commit b50fd16591c6f2655fd32a09f534c24f4da220c7 Author: embed@git.macaw.me Date: Sat Jan 6 01:38:28 2024 +0000 first diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..cd4ac1c --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright 2014 Jakub Jirutka . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..06714c3 --- /dev/null +++ b/Makefile @@ -0,0 +1,22 @@ +# -*-mode: makefile; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base +PREFIX=/usr/local +USER=`grep PRIV_SKEL_USERS_LIST /usr/local/etc/testforge/testforge.bash|sed -e 's/.*=//' -e 's/"//g'` +WD=${PWD} + +daily:: refresh +refresh:: check lint + +weekly:: update +update:: test + +check:: + sudo -u ${USER} /var/local/src/var_local_$(ROLE).bash $@ + +lint:: + sudo -u ${USER} /var/local/src/var_local_$(ROLE).bash $@ + +test:: + sudo -u ${USER} /var/local/src/var_local_$(ROLE).bash $@ + diff --git a/README.md b/README.md new file mode 100644 index 0000000..43df9e5 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ + +This base role is from https://github.com/gentoo-ansible/role-base/ +and is just a starting point. diff --git a/defaults/main.yml b/defaults/main.yml new file mode 100755 index 0000000..21b875f --- /dev/null +++ b/defaults/main.yml @@ -0,0 +1,135 @@ +# -*- mode: yaml; indent-tabs-mode: nil; tab-width: 2; coding: utf-8-unix -*- + +--- + +# these can be overridden using --extra-vars on the playbook command line + +BASE_ROOT_USER: "{{BOX_ROOT_USER|default('root')}}" +BASE_ROOT_GROUP: "{{BOX_ROOT_GROUP|default('root')}}" + +BASE_PYTHON2_MINOR: "{{BOX_PYTHON2_MINOR}}" +BASE_PYTHON3_MINOR: "{{BOX_PYTHON3_MINOR}}" +BASE_PORTAGE_PYTHON_MINOR: "{{BOX_PORTAGE_PYTHON_MINOR}}" +BASE_IPV6_DISABLE: "{{BOX_IPV6_DISABLE}}" + +# These should come from the wrapper script +BASE_USR_LOCAL: "{{ USR_LOCAL }}" +BASE_PYVENV_ARGS: "--system-site-packages" +BASE_PYVENV2_ARGS: "{{BASE_PYVENV_ARGS}}" +BASE_PYVENV3_ARGS: "{{BASE_PYVENV_ARGS}}" + +# this is now ignored and built into pip2.sh/pip3.sh as pip:extra_args is post the install keyword +BASE_PIP_GLOBAL_ARGS: "--disable-pip-version-check --cache-dir {{ PLAY_PIP_CACHE|default('/mnt/o/Cache/Pip') }} --cert '{{ PLAY_CA_CERT|default('/usr/local/etc/ssl/cacert-testforge.pem') }}'" +# force pip packages to be explicitly installed or fail +BASE_PIP_INSTALL_ARGS: "{{ BASE_PIP_GLOBAL_ARGS +' --no-deps --prefix='+USR_LOCAL }}" +# and/or to prevent pip form downloading from the internet +#? BASE_PIP_INSTALL_ARGS: "{{ PIP_INSTALL_ARGS|default('--proxy=localhost:9999') }}" +BASE_USER_NAME: "{{ BOX_USER_NAME }}" +BASE_USER_HOME: "{{ BOX_USER_HOME }}" +BASE_ALSO_GROUP: "{{ BOX_ALSO_GROUP }}" +# These should come from the inventory hosts.yml +HOST_MOUNT_SYMLINKS: [] +HOST_MOUNT_SYMLINK_CONTENTS: {} +HOST_CONTAINER_MOUNTS: [] +BASE_HOST_CONTAINER_MOUNTS: "{{BOX_HOST_CONTAINER_MOUNTS}}" +HOSTNAME_HARDWARE: '' +BASE_ALSO_USERS: "{{BOX_ALSO_USERS}}" +LXD_TRUST_PASSWORD: "" + +# per-user config dir but expanduser is broken so we leave off the ~/ prefix +BASE_USER_CONFIG_DIR: ".config/testforge" + +# unused - seems to be built for zip only not tar +BASE_UNTAR_ARGS: +#? - "-C" + - "--owner=root" + - "--group=root" + - "--no-same-owner" + - "--keep-newer-files" +# - "--no-same-permissions" + +BASE_PKG_IGNORE_ERRORS: true # "{{ ansible_virtualization_role|replace('NA', 'host') == 'guest' }}" +BASE_IGNORE_LOCAL_ERRORS: BASE_ARE_CONNECTED|default('') == '' + +# Look for you timezone in /usr/share/zoneinfo. +# Example: Europe/Prague +BASE_timezone: "{{BOX_timezone}}" + +# Is your hardware clock set local (true), or UTC (false)? - true is advised by created problems DB +hwclock_local: "{{BOX_hwclock_local}}" + +# Do you want to set the hardware clock to the current system time (software +# clock) during shutdown? +hwclock_systohc: "{{BOX_hwclock_systohc}}" + +# Do you want to set the system time to the current hardware clock +# during bootup? +hwclock_hctosys: "{{BOX_hwclock_hctosys}}" + +# Should be copies of the /etc/skel files inside the /home/${USER} updated? +# Only files that were not modified by a user will be updated. +skel_update_homes: false + +# List of locales to generate. +env_locales: + - "C.UTF-8 UTF-8" + - "en_US.UTF-8 UTF-8" + - "en_GB.UTF-8 UTF-8" + - "en_CA.UTF-8 UTF-8" + +# The default system LANG. +env_locale_lang: en_US.UTF-8 + +# The default EDITOR. +env_editor: "/usr/bin/mg" + +BASE_BOOT_DIR: "/boot" +#BASE_BOOT_DIR: "/mnt/l/syslinux" +BASE_ROOT_LOG_DIR: "/root/var/tmp/{{date_slash}}" + +BASE_SCRIPT_DIR: "{{USR_LOCAL}}/sbin" +BASE_LOG_DIR: "{{USR_LOCAL}}/var/log" + +# subkeys.pgp.net does NOT work +BASE_GENTOO_KEYSERVER: "keys.gentoo.org" +BASE_GPG_SERVER: "{{BOX_GPG_SERVER}}" +# I tried these four, I believe that number 3 finally worked and allowed me to download the keys in question for accessing the tor update servers. +# https://github.com/Stadicus/RaspiBolt/issues/343 +# hkps.pool.sks-keyservers.net +# keys.gnupg.net +# pgp.uni-mainz.de +# pgp.mit.edu + +BASE_FUNTOO_PROFILES_DIRS: + - /var/local/git/meta-repo/kits/core-kit/profiles/base + +BASE_FUNTOO_MIXINS_DIRS: + - /var/local/git/meta-repo/kits/core-kit/profiles/funtoo/1.0/linux-gnu/mix-ins + +BASE_FUNTOO_MIXINS: + - X + - xfce + - no-systemd + +# This now gets overwritten in firewall.conf from route|grep ^default +# should get replaced by BASE_ARE_CONNECTED if it's not null +BASE_DEFAULT_OUTPUT_IF: "{{BOX_DEFAULT_OUTPUT_IF|default('wlan7')}}" + +BASE_FEATURES: "{{BOX_BASE_FEATURES}}" +BASE_LIB: "{{BOX_USR_LIB}}" +LIB: "{{BASE_LIB}}" + +base_ipv4_nat_dev: "{{ ansible_default_ipv4.alias }}" # enp0s3 +#? ansible_default_ipv4.address +base_ipv4_nat_ip: "{{ ansible_default_ipv4.address }}" # 10.0.2.15" +base_ipv4_nat_gw_ip: "{{ ansible_default_ipv4.gateway }}" # "10.0.2.2" +# "ansible_dns": {"nameservers": [ "10.0.2.2" +base_ipv4_nat_dns_ip: "{{ ansible_dns[0].nameseervers[0] }}" #? "10.0.2.3" + +BASE_PASSWORDS_DATABASE: "" +BASE_PASSWORDS_PASSWORD: "" +BASE_HOSTNAME_HARDWARE: "{{BOX_HOST_NAME}}" +BASE_TIMEZONE: Etc/UTC + +base_passwords_database: "{{box_passwords_database}}" + diff --git a/files/find_skels_to_update.sh b/files/find_skels_to_update.sh new file mode 100644 index 0000000..2c555ce --- /dev/null +++ b/files/find_skels_to_update.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +filename="$1" + +for homedir in /home/*; do + if cmp "/etc/skel/$filename" "$homedir/$filename" &>/dev/null; then + owner_group=$(find $homedir -maxdepth 0 -printf '%u:%g') + echo "$filename:$homedir:${owner_group%:*}:${owner_group#*:}" + fi +done diff --git a/handlers/main.yml b/handlers/main.yml new file mode 100644 index 0000000..a4ff98f --- /dev/null +++ b/handlers/main.yml @@ -0,0 +1,151 @@ +# -*- mode: yaml; indent-tabs-mode: nil; tab-width: 2; coding: utf-8-unix -*- + +--- + +- name: update facts + shell: | + # {{USR_LOCAL}}/bin/testforge_local_bin.bash + true + +# old base +- name: "update locales" + command: | + locale-gen --update + +- name: "update env" + command: | + env-update + +- name: "update eix" + # now done monthly + command: | + echo eix-update + +- name: "update peruser facts" + # broken this is yaml + # /usr/local/bin/testforge_local_bin.bash {{item|expanduser}} + command: | + true + with_items: + - "~{{BOX_USER_NAME}}/{{ BASE_USER_CONFIG_DIR }}/facts.d" + +# the tar step just before this may have added certificates to +# /usr/local/share and we may need to run this. Run it anyway +- name: "update-ca-certificates" + shell: | + # morons: this return rc=0 even when there is an exception - with java7 - + # org.debian.security.InvalidKeystorePasswordException: Cannot open Java keystore. Is the password correct? + update-ca-certificates -f -v > /tmp/U$$.log 2>&1 + retval=$? + [ $retval -eq 0 ] && exit 0 + if grep Exception: /tmp/U$$.log ; then + retval=1 + cat /tmp/$$.log + else + retval=0 + fi + exit $retval + +# FixMe: was unused +- name: "chmod /usr/local" + shell: | + # there are some exceptions in usr/local + # chown -R "{{BOX_USER_NAME}}.{{BOX_ALSO_GROUP}}" {{USR_LOCAL}} + chmod -R g+rw,o-w {{USR_LOCAL}} + # was TESTF_ + # this should not be needed but it still is because we execute src/var_local_*sh + # as BOX_USER so they need to be writable - *before* the src/ files are run. + chmod a+x {{VAR_LOCAL}}/*bin/*sh {{VAR_LOCAL}}/src/*sh + chmod g+rw {{USR_LOCAL}}/*bin/*sh {{USR_LOCAL}}/src/*sh + + # was unused - not +- name: chmod /usr/local + shell: | + # do I need this? should it be in hourly? it breaks lynis but I think we still need it + #? chmod -R g+rw,o-w {{USR_LOCAL}} + # FixMe: it breaks lynis - fix it here + chown -R "{{BOX_USER_NAME}}.{{BOX_ALSO_GROUP}}" \ + {{USR_LOCAL}}/src \ + {{USR_LOCAL}}/share \ + {{USR_LOCAL}}/bin \ + {{USR_LOCAL}}/{{BASE_LIB}} \ + {{USR_LOCAL}}/net + exit 0 + +# was in testforge - to fix problems caused by taring +- name: chmod /var/local + shell: | + # do I need this? should it be in hourly? it breaks lynis but I think we still need it + #? chmod -R g+rw,o-w {{VAR_LOCAL}} + # FixMe: it breaks lynis - fix it here + chown -R "{{BOX_USER_NAME}}.{{BOX_ALSO_GROUP}}" \ + {{VAR_LOCAL}}/src \ + {{VAR_LOCAL}}/share \ + {{VAR_LOCAL}}/bin \ + {{VAR_LOCAL}}/{{BASE_LIB}} \ + {{VAR_LOCAL}}/net + exit 0 + +# FixMe: somethings strange in /var/local/etc +# drwxr-xr-x 2 1056888 1049089 4096 Nov 30 13:38 w3af + +# FixMe: was PENT_ +# FixMe: unused +- name: "strange ownership - coming from tar?" + shell: | + chown -R {{BOX_USER_NAME}}.{{BOX_ALSO_GROUP}} {{VAR_LOCAL}}/src {{VAR_LOCAL}}/bin {{VAR_LOCAL}}/net + + +- name: summary of logs + debug: + # lookup('vars', item ) returns a list of strings?; map yields generators + # |map(attribute='stdout')|list|join('\n') -> 'str object' has no attribute 'stdout' + msg: "{{ lookup('vars', item ) }}" + when: + - lookup('vars', item, default=[])|length > 0 + # base proxy + with_items: + # proxy + - proxy_log_hourly + # testforge + - testforge_log_hourly + - testforge_log_daily + - testforge_log_weekly + - testforge_log_monthly + - testforge_log_test + # pydev + - pydev_log_test + # text + - text_log_test + - text_log_hourly + # testing + - testing_log_test + # harden + - harden_log_hourly + - harden_log_daily + - harden_log_weekly + - harden_log_monthly + - harden_log_test + # logging + - logging_log_daily + - logging_log_test + # hostvms + - hostvms_log_bin + - hostvms_log_test + # pentest + - pentest_log_weekly + - pentest_log_test + # privacy + - privacy_log_hourly + - privacy_log_daily + - privacy_log_weekly + - privacy_log_monthly + - privacy_log_test + # gpgkey + - gpgkey_log_weekly + # trading + - trading_log_test + # update + - update_log_daily + - update_log_test + diff --git a/meta/main.yml b/meta/main.yml new file mode 100644 index 0000000..fde74a6 --- /dev/null +++ b/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Jakub Jirutka + company: CTU in Prague + description: Base system setup + license: MIT + min_ansible_version: 2.3 # maybe - curently tested under 2.7 + platforms: + - name: "Gentoo" + categories: + - system + +dependencies: [] diff --git a/overlay/Debian/usr/local/bin/de_bootstrap_gateway.bash b/overlay/Debian/usr/local/bin/de_bootstrap_gateway.bash new file mode 100755 index 0000000..fe6efc7 --- /dev/null +++ b/overlay/Debian/usr/local/bin/de_bootstrap_gateway.bash @@ -0,0 +1,15 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +PREFIX=/usr/local + +ROLE=base + +local_guest_fstab_mnt + +. /usr/local/etc/local.d/local.bash || exit 1 + +local_guest || exit 3$? + +#? local_guest_add_xorg_conf + diff --git a/overlay/Debian/usr/local/sbin/debian_cache_to_archives.bash b/overlay/Debian/usr/local/sbin/debian_cache_to_archives.bash new file mode 100755 index 0000000..2f99c75 --- /dev/null +++ b/overlay/Debian/usr/local/sbin/debian_cache_to_archives.bash @@ -0,0 +1,26 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter + +ROLE=base + +[ -z "$CACHE" ] && CACHE=/mnt/o/Cache/Apt/Debian/10.6 +[ -d "$CACHE" ] || exit 1$? + +[ -d /etc/apt ] || exit 0 + +cd $CACHE || exit 2 + +[ -d var/cache/apt/archives ] || mkdir -p var/cache/apt/archives + +find *.deb -type f -name \*.deb | while read file; do + base=$( basename $file ) + [ ! -d /var/cache/apt/archives/ ] || \ + [ -e /var/cache/apt/archives/$base ] || ln -s $PWD/$file /var/cache/apt/archives/$base + [ -f var/cache/apt/archives/$base -a ! -h var/cache/apt/archives/$base ] && rm var/cache/apt/archives/$base + [ -e var/cache/apt/archives/$base ] || ln -s $PWD/$file var/cache/apt/archives/$base + done + + +exit 0 + diff --git a/overlay/Debian/usr/local/sbin/debian_uris_to_urls.bash b/overlay/Debian/usr/local/sbin/debian_uris_to_urls.bash new file mode 100755 index 0000000..ebf5028 --- /dev/null +++ b/overlay/Debian/usr/local/sbin/debian_uris_to_urls.bash @@ -0,0 +1,31 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter or .uris + +ROLE=base +[ -z "$CACHE" ] && CACHE=/mnt/o/Cache/Apt/Debian/10.6 +[ -d "$CACHE" ] || mkdir $CACHE # || exit 1$? + +# debian --print-uris +if [ $? -eq 0 ] ; then + # filter +grep 'https*://' | \ + sed -e 's@ftp://[^ ]*@@g' -e 's@.*https*://@https://@g' -e "s@'.*@@g" | \ + while read line ; do + for url in $line ; do + base=`basename "$url"` + pre=`sed -e "s@https*://@${CACHE}@" <<< $url` + [ -e $pre ] && break + echo $line + break + done + done + fi +for elt in "$@" ; do + base=$( basename $elt .elts ) + [ -s $base.urls ] && continue + sh $0 < $elt > $base.urls + [ -s $base.urls ] || rm $base.urls + done + +exit 0 diff --git a/overlay/Debian/usr/local/sbin/parrot_uris_to_urls.bash b/overlay/Debian/usr/local/sbin/parrot_uris_to_urls.bash new file mode 100755 index 0000000..37f24ed --- /dev/null +++ b/overlay/Debian/usr/local/sbin/parrot_uris_to_urls.bash @@ -0,0 +1,31 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter or .uris +[ -z "$CACHE" ] && CACHE=/mnt/o/Cache/Apt/Parrot/Rolling +[ -d "$CACHE" ] || mkdir $CACHE # || exit 1$? + +# debian --print-uris +if [ $? -eq 0 ] ; then + # filter +grep 'https*://' | \ + sed -e 's@ftp://[^ ]*@@g' \ + -e 's@.*https*://@http://@g' -e "s@'.*@@g" \ + -e 's@mirror.parrot.sh/mirrors/parrot@mirrors.aliyun.com/parrot@' | \ + while read line ; do + for url in $line ; do + base=`basename "$url"` + pre=`sed -e "s@https*://@${CACHE}@" <<< $url` + [ -e $pre ] && break + echo $line + break + done + done + fi +for elt in "$@" ; do + base=$( basename $elt .elts ) + [ -s $base.urls ] && continue + sh $0 < $elt > $base.urls + [ -s $base.urls ] || rm $base.urls + done + +exit 0 diff --git a/overlay/Devuan/usr/bin/dev_bootstrap.bash b/overlay/Devuan/usr/bin/dev_bootstrap.bash new file mode 100644 index 0000000..ca6b039 --- /dev/null +++ b/overlay/Devuan/usr/bin/dev_bootstrap.bash @@ -0,0 +1,11 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +PREFIX=/usr/local + +ROLE=base + +. /usr/local/etc/local.d/local.bash || exit 1 + + +/usr/local/etc/local.d/Whonix-Gateway.rc || exit 2$? diff --git a/overlay/Devuan/usr/bin/dev_bootstrap_gateway.bash b/overlay/Devuan/usr/bin/dev_bootstrap_gateway.bash new file mode 100644 index 0000000..42e4eab --- /dev/null +++ b/overlay/Devuan/usr/bin/dev_bootstrap_gateway.bash @@ -0,0 +1,13 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +PREFIX=/usr/local + +ROLE=base + +. /usr/local/etc/local.d/local.bash || exit 1 + +local_guest || exit 3$? + +#? local_guest_add_xorg_conf + diff --git a/overlay/Devuan/usr/local/bin/de_bootstrap_gateway.bash b/overlay/Devuan/usr/local/bin/de_bootstrap_gateway.bash new file mode 100755 index 0000000..fe6efc7 --- /dev/null +++ b/overlay/Devuan/usr/local/bin/de_bootstrap_gateway.bash @@ -0,0 +1,15 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +PREFIX=/usr/local + +ROLE=base + +local_guest_fstab_mnt + +. /usr/local/etc/local.d/local.bash || exit 1 + +local_guest || exit 3$? + +#? local_guest_add_xorg_conf + diff --git a/overlay/Devuan/usr/local/bin/devuan_curl_urls.sh b/overlay/Devuan/usr/local/bin/devuan_curl_urls.sh new file mode 100644 index 0000000..7859ac9 --- /dev/null +++ b/overlay/Devuan/usr/local/bin/devuan_curl_urls.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# filter - arguments are to wget - quoted? + +ROOTDIR=/mnt/i +ROLE=base + +LARGS="-X -P /o/Cache/Apt/Devuan/4" +RARGS="--retry 1" + +grep ^http | \ + sed -e 's@http://@https://@g' \ + | \ + sed -e 's@ftp://[^ ]*@@' \ + -e 's@^https://distfiles.gentoo.org/distfiles/[^ ]* https://pypi.python.org/@https://pypi.python.org/@' \ + -e 's@https*://distfiles.gentoo.org@https://gentoo.osuosl.org@g' \ + -e 's@https*://gentoo.osuosl.org@https://mirror.leaseweb.com/gentoo@g' \ + -e 's@https*://download.sourceforge.net@https://download.sourceforge.net@g' | \ + while read urls ; do + url=`echo $urls|sed -e 's@ .*@@'` + base=`basename "$url"` + [ -e /usr/portage/distfiles/$base ] && echo distfiles/$base && continue + for url in $urls ; do + /usr/local/bin/scurl.bash $LARGS -- $RARGS $url || continue + break + done + done diff --git a/overlay/Devuan/usr/local/bin/devuan_uris_to_urls.bash b/overlay/Devuan/usr/local/bin/devuan_uris_to_urls.bash new file mode 100755 index 0000000..37091d1 --- /dev/null +++ b/overlay/Devuan/usr/local/bin/devuan_uris_to_urls.bash @@ -0,0 +1,42 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base + +. /usr/local/bin/usr_local_tput.bash || exit 2 + +[ $# -eq 0 ] && USAGE $0 url-files... + +cnt=beowulf-e22532c6f83a + +cd /mnt/o/Cache/Apt || exit 3 + +cat "$@" | debian_uris_to_urls.bash \ + | grep deb$ \ + | while read url ; do + root=`echo $url | sed -e 's@.*DEBIAN@@'` + dir=`dirname $root` + [ -f Debian/10.6/deb.debian.org/debian/pool/$root ] || { + new=`echo $url | sed -e 's@deb.devuan.org/@mirrors.dotsrc.org/devuan/@'` + newf=`echo $new | sed -e 's@^http*:/@Devuan/3@'` + [ -f $newf ] || echo $new + continue + } + [ -h Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$dir ] && \ + rm Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$dir + [ -d Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$dir ] || \ + mkdir -p Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$dir + [ -f Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$root ] && \ + rm Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$root + [ -h Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$root ] && continue + ln -s $PWD/Debian/10.6/deb.debian.org/debian/pool/$root \ + Devuan/3/deb.devuan.org/merged/pool/DEBIAN/$root + done + +TODIR=/var/lib/lxd/storage-pools/default/containers/$cnt/rootfs +TODIR=$TODIR/var/cache/apt/archives/ +find Devuan/ -name \*.deb -type l|while read file ; do \ + base=`basename $file` + [ -e $TODIR/$base ] && continue + ln -s $PWD/$file $TODIR + done diff --git a/overlay/Devuan/usr/local/sbin/de_unmarkauto.bash b/overlay/Devuan/usr/local/sbin/de_unmarkauto.bash new file mode 100755 index 0000000..8c32c4a --- /dev/null +++ b/overlay/Devuan/usr/local/sbin/de_unmarkauto.bash @@ -0,0 +1,11 @@ +#!/bin/sh + +ROLE=base + +[ $# -eq 0 ] && exit 1 + +apt-get remove $* $base.urls + [ -s $base.urls ] || rm $base.urls + done + +exit 0 diff --git a/overlay/Gentoo/usr/local/bin/base_gnupg_test.bash b/overlay/Gentoo/usr/local/bin/base_gnupg_test.bash new file mode 100755 index 0000000..4f310c4 --- /dev/null +++ b/overlay/Gentoo/usr/local/bin/base_gnupg_test.bash @@ -0,0 +1,58 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/usr/local +ROLE=base +. /usr/local/bin/usr_local_tput.bash || exit 1 + +# The idea here is to run ansible_local.bash --tags daily +# and then use this to do the parsing and throwing errors based on the output. +# This was the ansible run can be free from erroring and this can be +# run repeatedly anytime outside of ansible to deal with the issues raised. +# It is also run at the end of ansible_local.bash --tags daily to raise the issues. + +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +[ -f /usr/local/etc/testforge/testforge.bash ] && . /usr/local/etc/testforge/testforge.bash + +. /usr/local/etc/local.d/local.bash + +MYID=$( id -u ) +[ $MYID -eq 0 ] || { ERROR $prog must be run as root $MYID ; exit 1 ; } +LOG_DIR=/usr/local/tmp + +[ -d /etc/portage/gnupg ] || exit 0 + +KEY_ID=96D8BF6D +KEY_SER=subkeys.pgp.net +GPG="gpg --home /etc/portage/gnupg" + +# * - primary key: DCD05B71EAB94199527F44ACDB6B8C1F96D8BF6D +# * - subkey: E1D6ABB63BFCFB4BA02FDF1CEC590EEAC9189250 + +if route | grep ^def ; then + [ -f /usr/local/bin/base_daily.gpg ] || \ + /usr/local/bin/scurl.bash --output /usr/local/bin/base_daily.gpg \ + https://qa-reports.gentoo.org/output/service-keys.gpg || \ + exit 2$? +fi + +if [ -f /usr/local/bin/base_daily.gpg ] ; then + : trusted + # expired keys pass this + if $GPG --list-keys --with-colons |grep $KEY_ID |grep :f: ; then + : full trust? + elif $GPG --list-keys |grep -A1 $KEY_ID |tail -1| grep unknown ; then + expect /usr/local/bin/base_daily.exp || exit 4$? + elif $GPG --list-keys |grep -A1 $KEY_ID |tail -1| grep trust ; then + : this format does not exist + else + PANIC 3 $GPG --list-keys $KEY_ID not trusted + fi +else + gpg --homedir /etc/portage/gnupg --keyserver $KEY_SER --recv-keys 0x$KEY_ID + expect /usr/local/bin/base_daily.exp || exit 5$? + # was gpg --homedir /etc/portage/gnupg --edit-key 0x$KEY_ID trust || exit 6 +fi + +ls -ld /etc/portage/gnupg | grep -q drwx------ || chmod 700 /etc/portage/gnupg diff --git a/overlay/Gentoo/usr/local/etc/python-exec/python-exec.lis b/overlay/Gentoo/usr/local/etc/python-exec/python-exec.lis new file mode 100644 index 0000000..13b5c85 --- /dev/null +++ b/overlay/Gentoo/usr/local/etc/python-exec/python-exec.lis @@ -0,0 +1,336 @@ +#/usr/bin +2to3 +UTscapy +activate-global-python-argcomplete +airdrop-ng +airgraph-ng +airodump-join +alembic +apirst2html.py +automat-visualize +autopep8 +bandit +bandit-baseline +bandit-config-generator +behave +binwalk +bleachbit +bokeh +buildhtml.py +buildout +bumpversion +bzr +calc-prorate +catalyst +cftp +change_tz +chardetect +cherryd +ckeygen +conch +coverage +coverage-2.7 +coverage-3.6 +coverage2 +coverage3 +cpuinfo +csscapture +csscombine +cssparse +csv2rdf +cxfreeze +cxfreeze-quickstart +cygdb +cython +cythonize +depend-java-query +distro +docker-compose +doesitcache +dropbox-cli +easy_install +echo_supervisord_conf +eclean +eclean-dist +eclean-pkg +edfviewer +ekeyword +elementsinfo +enalyze +epkginfo +epsg_tr.py +epydoc +epylint +equery +eshowkw +esri2wkt.py +f2py +fab +fast-import-filter +fast-import-info +fast-import-query +fbless +flake8 +flask +futurize +gcps2vec.py +gcps2wld.py +gdal2tiles.py +gdal2xyz.py +gdal_auth.py +gdal_calc.py +gdal_edit.py +gdal_fillnodata.py +gdal_merge.py +gdal_pansharpen.py +gdal_polygonize.py +gdal_proximity.py +gdal_retile.py +gdal_sieve.py +gdalchksum.py +gdalcompare.py +gdalident.py +gdalimport.py +gdalmove.py +gemato +gertty +gflags2man.py +gjl +glances +gsettings-schema-convert +helpviewer-2.8 +helpviewer-3.0 +http +ics_diff +idle +img2png-2.8 +img2png-3.0 +img2py-2.8 +img2py-3.0 +img2xpm-2.8 +img2xpm-3.0 +imlate +invoke +iotop +iptest +iptest2 +iptest3 +ipython +ipython2 +ipython3 +isort +isympy +java-config-2 +jsonpointer +jsonschema +jupyter +jupyter-bundlerextension +jupyter-kernel +jupyter-kernelspec +jupyter-migrate +jupyter-nbconvert +jupyter-nbextension +jupyter-notebook +jupyter-qtconsole +jupyter-run +jupyter-serverextension +jupyter-trust +kernprof +keyczart +kivy-garden +layman +layman-mounter +layman-overlay-maker +layman-updater +livereload +m2r +mailmail +mako-render +margins +markdown2 +markdown_py +mca2edf +meson +mibcopy.py +mibdump.py +miniterm.py +mkbootimg +mkdocs +mkgraticule.py +ndg_httpclient +nosetests +nyx +odo +ogrmerge.py +paster +pasteurize +paver +pbr +pct2rgb.py +pdfshuffler +pdoc +peakidentifier +pep8 +pidproxy +pt2to3 +ptdump +ptrepack +pttree +pudb +pudb3 +pwiz.py +pxml +py.test +pyalacarte-2.8 +pyalacarte-3.0 +pyalamode-2.8 +pyalamode-3.0 +pybabel +pybitmessage +pybot +pycallgraph +pycodestyle +pycrust-2.8 +pycrust-3.0 +pydoc +pyflakes +pygmentize +pygobject-codegen-2.0 +pyhtmlizer +pyjwt +pylint +pylupdate5 +pymca +pymcabatch +pymcapostbatch +pymcaroitool +pyrcc5 +pyreverse +pyro4-check-config +pyro4-flameserver +pyro4-httpgateway +pyro4-ns +pyro4-nsc +pyro4-test-echoserver +pyroma +pyrsa-decrypt +pyrsa-decrypt-bigfile +pyrsa-encrypt +pyrsa-encrypt-bigfile +pyrsa-keygen +pyrsa-priv2pub +pyrsa-sign +pyrsa-verify +pyshell-2.8 +pyshell-3.0 +pytest +python-argcomplete-check-easy-install-script +python-config +python2-config +python3-config +pyuic5 +pyvenv +pywrap-2.8 +pywrap-3.0 +pywxrc-2.8 +pywxrc-3.0 +qemu-ga-client +qmp-shell +qr +quicktest.py +qutebrowser +raven +rdf2dot +rdfgraphisomorphism +rdfpipe +rdfs2dot +readelf.py +rebot +register-python-argcomplete +repo +repoman +revdep-rebuild +rgb2pct.py +rgbcorrelator +robot +rpyc_classic.py +rpyc_registry.py +rst2html.py +rst2html4.py +rst2html5.py +rst2latex.py +rst2man.py +rst2odt.py +rst2odt_prepstyles.py +rst2pdf +rst2pseudoxml.py +rst2s5.py +rst2xetex.py +rst2xml.py +rstpep2html.py +runxlrd.py +safety +scapy +scons +scons-configure-cache +scons-time +sconsign +scour +sphinx-apidoc +sphinx-autogen +sphinx-build +sphinx-quickstart +spyder3 +supervisorctl +supervisord +svg2rlg +symilar +tabulate +tkconch +tls.py +tlsdb.py +tor-prompt +tox +tox-quickstart +trial +twist +twistd +twisted-regen-cache +umpv +uniconvertor +unit2 +vba_extract.py +versioneer +veusz +virtualenv +vmxcap +vol.py +waitress-serve +watchmedo +wheel +wsdump.py +xml2po +xpra +xpra_browser +xpra_launcher +xpra_signal_listener +xrced-2.8 +xrced-3.0 +yamllint +yolk +yq +zope-testrunner +# /usr/sbin +archive-conf +dispatch-conf +env-update +fixpackages +regenworld +smbios-keyboard-ctl +smbios-lcd-brightness +smbios-passwd +smbios-sys-info +smbios-thermal-ctl +smbios-token-ctl +smbios-wakeup-ctl +smbios-wireless-ctl diff --git a/overlay/Gentoo/usr/local/etc/python-exec/python2-base.lis b/overlay/Gentoo/usr/local/etc/python-exec/python2-base.lis new file mode 100644 index 0000000..e69de29 diff --git a/overlay/Gentoo/usr/local/etc/python-exec/python3-base.lis b/overlay/Gentoo/usr/local/etc/python-exec/python3-base.lis new file mode 100644 index 0000000..212ed60 --- /dev/null +++ b/overlay/Gentoo/usr/local/etc/python-exec/python3-base.lis @@ -0,0 +1,19 @@ +ebuild +egencache +emerge +emirrordist +glsa-check +portageq +quickpkg +emaint +ansible +ansible-config +ansible-connection +ansible-console +ansible-doc +ansible-galaxy +ansible-inventory +ansible-playbook +ansible-playbook.bash +ansible-pull +ansible-vault diff --git a/overlay/Gentoo/usr/local/sbin/base_diff_from_dst.bash b/overlay/Gentoo/usr/local/sbin/base_diff_from_dst.bash new file mode 100755 index 0000000..d5d0cb8 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/base_diff_from_dst.bash @@ -0,0 +1,30 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +. /usr/local/bin/usr_local_base.bash || exit 1 +. /usr/local/bin/usr_local_tput.bash || exit 2 + +TODIR=/o/data/TestForge/src/ansible +BASE_DIR=`basename $PWD` + +[ $# -ge 1 ] && ROLE=$1 || ROLE= +if [ -z "$ROLE" ] ; then + base=`basename $PWD` + parent=`dirname $PWD` + file=$parent/$base.bash + if [ -f "$file" ] ; then + ROLE=`grep '^ *ROLE=' $file | sed -e 's/=.*//'` + fi +fi +shift + +if [ $PWD = $PREFIX/src ] ; then + ERROR $prog diffing in $PWD with "$@" + exit 4 +fi +INFO $prog diffing in $PWD with "$@" + +PREFIX=$PREFIX ROLE="$ROLE" \ + /var/local/src/var_local_src.bash ols_testforge_diffs + +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/box_gentoo_emerge.bash b/overlay/Gentoo/usr/local/sbin/box_gentoo_emerge.bash new file mode 100755 index 0000000..9256c81 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/box_gentoo_emerge.bash @@ -0,0 +1,51 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +LOG_DIR=/usr/local/var/logs/portage +[ -d $LOG_DIR ] || mkdir -p $LOG_DIR + +declare -a ARGS +if [ "$#" -eq 1 ] ; then + ARGS=( "$1" ) + LOG=$( basename $1 ).log + elif [ "$#" -eq 0 ] ; then + ARGS="@world" + LOG=world.log + elif false && [ -f world.lib ] ; then # ? + ARGS="$( grep -v '^#' world.lib )" + LOG=world.log + else + ARGS=("$@") + LOG=world.log + fi + +if mount | grep -q ' on /mnt/tmp' ; then + export TMPDIR=/mnt/tmp +# else +# echo "WARN: /mnt/tmp not mounted" + fi + +# --changed-deps --deep --update +LARGS="-vb --changed-use --with-bdeps=y --changed-deps-report" +LARGS="$LARGS --backtrack=30 --ignore-built-slot-operator-deps=y --keep-going" + +# Skips the packages specified on the command-line that have already been installed. +LARGS="$LARGS --noreplace" + +# LARGS="$LARGS --exclude " +LOG=$LOG_DIR/$LOG +export PYTHONPATH= +echo INFO: $LARGS $ARGS >> $LOG 2>&1 +nice python$BASE_PYTHON3_MINOR $( which emerge ) $LARGS $ARGS >> $LOG 2>&1 +[ $? -ne 0 ] && exit $? +if grep ImportError $LOG ; then + echo ERROR: ImportError $ARGS && exit 10 +elif grep ParseError $LOG ; then + echo ERROR: ParseError $ARGS && exit 11 +elif grep 'Your current profile is invalid' $LOG ; then + echo ERROR: Your current profile is invalid $ARGS && exit 12 +fi + +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_base_verify-sig.bash b/overlay/Gentoo/usr/local/sbin/gentoo_base_verify-sig.bash new file mode 100755 index 0000000..22b38c8 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_base_verify-sig.bash @@ -0,0 +1,168 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +. /usr/local/bin/usr_local_tput.bash + +PYVER=3 +PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +[ -d /etc/portage ] || exit 0 + +usage () { echo "USAGE: $prog [command args] -" $* ; exit 1 ; } +error () { retval=$1 ; shift; ERROR "$prog" $* ; exit $retval ; } +warn () { : ; } +info () { : ; } +debug () { : ; } + +# must be run as root +[ "$( id -u )" -ne "0" ] && error 1 "must be run as root" + +[ -f /etc/portage/package.use/2021-00_verify-sig.txt ] || \ + touch /etc/portage/package.use/2021-00_verify-sig.txt || exit 2 + +equery h -F '$cp:$slot' verify-sig | \ + sed -e 's/:0.*//' | while read b ; do \ + grep -q "^$b " /etc/portage/package.use/2021-00_verify-sig.txt && continue + eix -r "^$b$" | grep -q Installed && \ + echo '#' $b verify-sig>>/etc/portage/package.use/2021-00_verify-sig.txt || \ + echo '##' $b verify-sig>>/etc/portage/package.use/2021-00_verify-sig.txt +done + +[ -f /usr/lib/python$PYTHON_MINOR/site-packages/portage/eclass_cache.py.diff ] || \ + cat > /usr/lib/python$PYTHON_MINOR/site-packages/portage/eclass_cache.py.diff << EOF +*** eclass_cache.py.dst 2021-06-13 21:26:05.000000000 +0000 +--- eclass_cache.py 2021-06-24 10:45:12.422857990 +0000 +*************** +*** 166,175 **** +--- 166,176 ---- + return d + + def get_eclass_data(self, inherits): + ec_dict = {} + for x in inherits: ++ if x not in self.eclasses: continue + ec_dict[x] = self.eclasses[x] + + return ec_dict + + @property +EOF +[ -f /usr/lib/python$PYTHON_MINOR/site-packages/portage/eclass_cache.py.dst ] || \ + patch -b -z .dst /usr/lib/python$PYTHON_MINOR/site-packages/portage/eclass_cache.py \ + < /usr/lib/python$PYTHON_MINOR/site-packages/portage/eclass_cache.py.diff + +[ -f /usr/portage/eclass/verify-sig.eclass.diff ] || \ + cat > /usr/portage/eclass/verify-sig.eclass.diff << EOF +*** /usr/portage/eclass/verify-sig.eclass.dst 2021-07-29 06:09:55.000000000 +0000 +--- /usr/portage/eclass/verify-sig.eclass 2021-08-18 19:13:29.502980940 +0000 +*************** +*** 86,95 **** +--- 86,99 ---- + [[ -n ${key} ]] || + die "${FUNCNAME}: no key passed and VERIFY_SIG_OPENPGP_KEY_PATH unset" + + local extra_args=() + [[ ${VERIFY_SIG_OPENPGP_KEY_REFRESH} == yes ]] || extra_args+=( -R ) ++ # gemato -R, --no-refresh-keys ++ # Disable refreshing OpenPGP key (prevents network ++ # access, applicable when using -K only) ++ [ -z "$http_proxy" ] || extra_args+=( --proxy $http_proxy ) + [[ -n ${VERIFY_SIG_OPENPGP_KEYSERVER+1} ]] && extra_args+=( + --keyserver "${VERIFY_SIG_OPENPGP_KEYSERVER}" + ) + + # GPG upstream knows better than to follow the spec, so we can't +*************** +*** 98,110 **** + addpredict /run/user + + local filename=${file##*/} + [[ ${file} == - ]] && filename='(stdin)' + einfo "Verifying ${filename} ..." +! gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify "${sig}" "${file}" || +! die "PGP signature verification failed" + } + + # @FUNCTION: verify-sig_verify_message + # @USAGE: [] + # @DESCRIPTION: +--- 102,121 ---- + addpredict /run/user + + local filename=${file##*/} + [[ ${file} == - ]] && filename='(stdin)' + einfo "Verifying ${filename} ..." +! einfo gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify --disable-dirmngr \ +! "${sig}" "${file}" +! # --keyserver-options http-proxy=http://localhost:3128 +! einfo `env |sort` +! # env - is necessary andx sufficient +! env - gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify --disable-dirmngr \ +! "${sig}" "${file}" || \ +! die "PGP signature verification failed" + } + + # @FUNCTION: verify-sig_verify_message + # @USAGE: [] + # @DESCRIPTION: +*************** +*** 122,131 **** +--- 133,143 ---- + [[ -n ${key} ]] || + die "${FUNCNAME}: no key passed and VERIFY_SIG_OPENPGP_KEY_PATH unset" + + local extra_args=() + [[ ${VERIFY_SIG_OPENPGP_KEY_REFRESH} == yes ]] || extra_args+=( -R ) ++ [ -z "$http_proxy" ] || extra_args+=( --proxy $http_proxy ) + [[ -n ${VERIFY_SIG_OPENPGP_KEYSERVER+1} ]] && extra_args+=( + --keyserver "${VERIFY_SIG_OPENPGP_KEYSERVER}" + ) + + # GPG upstream knows better than to follow the spec, so we can't +*************** +*** 134,146 **** + addpredict /run/user + + local filename=${file##*/} + [[ ${file} == - ]] && filename='(stdin)' + einfo "Verifying ${filename} ..." +! gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify --output="${output_file}" "${file}" || +! die "PGP signature verification failed" + } + + # @FUNCTION: verify-sig_verify_signed_checksums + # @USAGE: [] + # @DESCRIPTION: +--- 146,165 ---- + addpredict /run/user + + local filename=${file##*/} + [[ ${file} == - ]] && filename='(stdin)' + einfo "Verifying ${filename} ..." +! einfo gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify --disable-dirmngr --output="${output_file}" \ +! "${file}" +! # --keyserver-options http-proxy=http://localhost:3128 +! einfo `env |sort` +! # env - is necessary and sufficient +! env - gemato gpg-wrap -K "${key}" "${extra_args[@]}" -- \ +! gpg --verify --disable-dirmngr --output="${output_file}" \ +! "${file}" || \ +! die "PGP signature verification failed" + } + + # @FUNCTION: verify-sig_verify_signed_checksums + # @USAGE: [] + # @DESCRIPTION: +EOF + +[ -f /usr/portage/eclass/verify-sig.eclass.dst ] || \ + patch -b -z .dst /usr/portage/eclass/verify-sig.eclass \ + < /usr/portage/eclass/verify-sig.eclass.diff || exit 3 + diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_bootstrap_sudo.bash b/overlay/Gentoo/usr/local/sbin/gentoo_bootstrap_sudo.bash new file mode 100755 index 0000000..8e03668 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_bootstrap_sudo.bash @@ -0,0 +1,13 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- +ROLE=base +root=$1 +if [ ! -f $root/usr/bin/sudo ] && [ -d $root/etc/portage/ ] ; then + [ -d $root/usr/portage/distfiles/ ] || \ + mkdir -p $root/usr/portage/distfiles + [ -f $root/usr/portage/distfiles/sudo-$SUDO_VER.tar.gz ] || \ + cp -p /usr/portage/distfiles/sudo-$SUDO_VER.tar.gz $root/usr/portage/distfiles/ + # env ROOT=$root emerge -vbp app-admin/sudo 2>&1| tee -a $root/root/sudo.log + chroot $root emerge -vbp app-admin/sudo 2>&1| tee -a $root/root/sudo.log + fi + diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_check_manifest.bash b/overlay/Gentoo/usr/local/sbin/gentoo_check_manifest.bash new file mode 100755 index 0000000..85ffbf9 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_check_manifest.bash @@ -0,0 +1,39 @@ +# This is recent - Pentoo not Funtoo +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +BASE_PORTDIR=/usr/portage + +keyf=/usr/share/openpgp-keys/gentoo-release.asc +[ -f $keyf ] || exit 1 + +gpg2 --disable-dirmngr --list-keys --with-sig-check --keyring $keyf >/tmp/G$$.log # 2>&1 || exit 1 +# gpg: 1 bad signature on Clipos +# gpg: 1 bad signature on Pentoo19 +if grep 'bad signature' /tmp/G$$.log ; then + echo ERROR: bad signature /tmp/G$$.log # exit 2 + # sig-3 BB572E0E2D182910 2009-08-25 Gentoo Linux Release Engineering (Automated Weekly Release Key) + gpg2 --disable-dirmngr --batch --delete-key BB572E0E2D182910 +fi + +# numbers 1-3 for certificate check level (see --ask-cert-level) +grep '^sig-[0-9]' /tmp/G$$.log && { echo WARN: bad signature ; exit 3 ;} +# gpg: 104 signatures not checked due to missing keys + +[ -f $BASE_PORTDIR/Manifest ] || exit 0 +# on stderr! +gpg2 --verify --keyring $keyf $BASE_PORTDIR/Manifest >/tmp/K$$.log 2>&1 || exit 3 +# DCD05B71EAB94199527F44ACDB6B8C1F96D8BF6D +# grep 'This key has expired' /tmp/K$$.log && exit 3 +grep 'using RSA key' /tmp/K$$.log || exit 4 +grep 'Primary key fingerprint:' /tmp/K$$.log | sed -e 's/.*: //' -e 's/ //g' > /tmp/K$$.key || exit 5 + +if route | grep -q ^default ; then + . /root/bin/tor.sh + wget -O /tmp/K$$.html https://www.gentoo.org/downloads/signatures/ || exit 0 + grep "`cat /tmp/K$$.key`" /tmp/K$$.html || { + echo ERROR: failed 'Primary key fingerprint:' "`cat /tmp/K$$.key`" + exit 5 + } + fi + +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_chroot_pentoo.bash b/overlay/Gentoo/usr/local/sbin/gentoo_chroot_pentoo.bash new file mode 100755 index 0000000..dac01d8 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_chroot_pentoo.bash @@ -0,0 +1,120 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +ROLE=testforge +# export PATH=$PATH:/usr/local/bin +MOUNTS="mnt/i mnt/j mnt/o" + +. /usr/local/bin/usr_local_tput.bash +error () { retval=$1 ; shift; ERROR "$0" $* ; exit $retval ; } +info () { INFO " $0 " $* ; } + +# must be run as root +[ "`id -u`" -ne "0" ] && error 1 "must be run as root" + +if [ "$#" -eq "0" ] ; then + error 2 "give an absolute directory name as argument" +fi +LARGS="" +CMD="" +while true; do + case "$1" in + '-'*) + LARGS="$LARGS $1" + shift + ;; + *) + break + ;; + esac +done + +root=$1 +shift +if [ ! -d "$root" ] ; then + error 3 "absolute directory name for chroot not found - $root" + fi + +# unix partition +[ -d $root/lost+found ] || exit 4 +# linux partition +[ -e $root/usr/src/ ] || exit 5 +cd $root || exit 6 + +if [ ! -d boot ] ; then + error 7 "missing boot/" + fi +for file in tmp usr/tmp var/tmp ; do + [ -d $file ] && continue + mkdir $file || error 8 " missing directory $file" + chmod 1777 $file + done +# df /var/tmp | grep -q sd.12 || mount /var/tmp + +for file in proc sys dev dev/pts dev/shm usr ; do + [ -d $file ] && continue + mkdir $file || exit 9 + chmod 755 $file + done + +# Think -R is causing problems +[ -e proc/self ] || mount -t proc none proc || error 10 +# this was rbind +[ -e sys/kernel ] || mount -o bind /sys sys || error 12 +[ -e dev/null ] || mount -o bind /dev dev || error 11 +[ -e /dev/pts/0 ] || \ + mount -t devpts -o rw,relatime,mode=600 devpts dev/pts \ + || error 12 +#? try mount| while read a on what type [ tmpfs cgroup binfmt_misc? ] without rbind +mount| while read what foo on bar type rest ; do + # cgroup devpts devtmpfs ecryptfs ext2 fuseblk proc rpc_pipefs sysfs tmpfs vfat + # leave real disks for $MOUNTS + [ $type = 'fuse' -o $type = 'ext2' -o $type = 'ext4' -o $type = 'vfat' -o ] && continue + # have done these + [ $type = 'proc' -o $type = 'sys' -o $type = 'dev' -o $type = 'devpts' -o ] && continue + DBUG "Dunno $what $on $type" + done +# check for /dev/loop devices - up to 255 on android +[ -e /dev/loop1 ] || \ + ( cd /dev && \ + for i in 0 1 2 3 4 5 6 7 ; do + [ -e loop$i ] && continue + mknod loop$i b 7 $i + chmod 660 loop$i + chgrp disk loop$i + done ) + +for elt in $MOUNTS ; do + [ -d $elt ] || { mkdir $elt ; chmod 755 $elt ; } + grep -q /$elt /proc/mounts || continue + [ -d $elt/tmp ] && continue + grep -q $root/$elt /proc/mounts && continue + mount --bind /$elt $root/$elt + done + +# You'll also want to copy over resolv.conf in order to have proper DNS name +# resolution from inside the chroot: +# but in chroot, you'll need to change this to your connected IP address. +cp -L /etc/resolv.conf etc || exit 16 + +for file in .bashrc .profile .jedrc ; do + [ -f root/$file ] || \ + cp -p /root/$file root/$file + done + + +EARGS="CHROOT=1 LANG=en_US.UTF-8 LC_COLLATE=C" +EELTS="$EELTS TERM DISPLAY HOME USER LOGNAME USERNAME PATH" +EELTS="$EELTS http_proxy https_proxy socks_proxy no_proxy" +for elt in $EELTS ; do + EARGS="$EARGS `env|grep ^${elt}=`" + done + +# was /bin/bash -l +[ "$#" -eq 0 ] && set -- /bin/sh -i + +# Now you can chroot into your new system. Use env before chroot to ensure that no +# environment variables from the installation media are used by your new system: +#? PATH=$PATH +echo chroot $LARGS $root /usr/bin/env -i $EARGS "$@" +chroot $LARGS $root /usr/bin/env -i $EARGS "$@" diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_clean_portage_packages.bash b/overlay/Gentoo/usr/local/sbin/gentoo_clean_portage_packages.bash new file mode 100755 index 0000000..4243b43 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_clean_portage_packages.bash @@ -0,0 +1,45 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +# shopt -s nullglob + +[ -f /etc/conf.d/hostname ] && . /etc/conf.d/hostname + +if [ $# -eq 1 ] ; then + TO=$1 + elif [ -n "$hostname" -a "$hostname" = "pentoo" ] ; then + TO=/mnt/o/Cache/linuxPen19/var/cache/portage/packages + elif [ -f /etc/dracut.conf.d/funtoo.conf ] ; then + TO=/mnt/o/Cache/linuxFun64/var/cache/portage/packages + elif [ -f /etc/dracut.conf.d/clipos.conf ] ; then + TO=/mnt/o/Cache/linuxClipos/var/cache/portage/packages + else + exit 3 +fi + +[ -n "$TO" ] || { echo ERROR: empty TO ; exit 4 ; } +[ -d $TO ] || { echo ERROR: mkdir -p $TO ; exit 5 ; } + + pushd $TO + # -o -name \*.xpak + find *-* virtual pentoo -type f -name \*tbz2 | grep -v '_sources\|_download_' | while read file ; do + dir=`dirname $file` + base=`basename $file` + subdir=`sed -e 's/-[0-9].*//' <<< $base` + xdir=$dir/$subdir + [ -d $xdir ] || continue + xpak=`sed -e 's/.tbz2/-?.xpak/' <<< $base` + ls $file $xdir/$xpak 2>/dev/null && rm -f $xdir/$xpak + done + popd + + cd $OUT + rm -rf sys-firmware/intel-microcode* \ + sys-kernel/linux-firmware/* qpkg.* + + find -L *-*/ -type l -delete 2>&1| \ + grep 'Too many levels'| \ + sed -e 's/find: .//' -e 's/xpak.:.*/xpak/'| \ + xargs rm -f + + exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_eix-test-obsolete.bash b/overlay/Gentoo/usr/local/sbin/gentoo_eix-test-obsolete.bash new file mode 100755 index 0000000..d584724 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_eix-test-obsolete.bash @@ -0,0 +1,8 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# https://forums.gentoo.org/viewtopic-t-1044220-start-0.html + +ROLE=base + +eix-test-obsolete -H brief | grep -v -e "^No " -e "^Skipping " -e "^$" +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_etc_portage.bash b/overlay/Gentoo/usr/local/sbin/gentoo_etc_portage.bash new file mode 100755 index 0000000..334bdff --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_etc_portage.bash @@ -0,0 +1,109 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/var/local +ROLE=base +. /usr/local/etc/testforge/testforge.bash || exit 1 +. /usr/local/bin/usr_local_base.bash || exit 2 +USAGE="$0 role-directory" +#echo 1 +[ $# -ge 1 ] || { ERROR $USAGE ; exit 3 ; } + +[ -n "$TESTFORGE_ANSIBLE_SRC" ] || TESTFORGE_ANSIBLE_SRC=/g/TestForge/src/ansible +base=`cat /etc/hostname` +json="$TESTFORGE_ANSIBLE_SRC"/tmp/Hosts/$base.json +if [ -d "$TESTFORGE_ANSIBLE_SRC" ] ; then + if [ ! -s $json ] || [ "$TESTFORGE_ANSIBLE_SRC"/hosts.yml -nt $json ] ; then + /usr/local/sbin/gentoo_hosts_json.bash || exit 4$? + fi +fi + +BOX_OS_FLAVOR=`/usr/local/sbin/gentoo_hosts_json.bash` || exit 4$? + +# "BOX_PYTHON3_MINOR": "3.10" +PYTHON3_VER=`grep BOX_PYTHON3_MINOR <$json|sed -e 's/.*\.//' -e 's/,//' -e 's/"//'` + +TASKS="Gentoo/$BOX_OS_FLAVOR" +YAML_HEAD="# -*- mode: yaml; tab-width: 0; coding: utf-8-unix -*- +# This is an automatically generated file: do not edit + +--- + +""" +TEXT_HEAD="# -*- mode: text; tab-width: 8; coding: utf-8-unix -*- + +""" + +for dir in $* ; do + [ -d $dir ] || { ERROR $dir ; exit 5 ; } + [ -d $dir/vars ] || { WARN not a roles directory $dir/vars ; } + YAML_BODY="- name: \"\\1\"\n blockinfile:\n dest: \\1\n create: true\n marker: \"# {mark} Ansible Managed Block $dir \\2\"\n block: |" + for port in use mask unmask license accept_keywords ; do + # DBUG $dir $port + cp /dev/null $dir/vars/$port.txt + grep '^ *- .*/' $dir/vars/Gentoo2.yml \ + | sed -e 's@^ *- @@' -e 's@ .*@@' \ + | uniq \ + | while read elt ; do + base=`basename $elt` + # override with $dir/tasks/$TASKS/portage.yml + file=$dir/tasks/$TASKS/portage.yml + [ -f $file ] && \ + grep -Hq "^ *[=>]*$elt" $file && \ + DBUG $port - "^ *[=>]*$elt" is already in $file && \ + continue + file=$dir/vars/$port.txt + [ -f $file ] && \ + grep -Hq "^ *[=>]*$elt" $file && \ + DBUG "^ *[=>]*$elt" is already in $file + # && continue + grep "^[=<>]*$elt" /etc/portage/package.$port/2*txt \ + | sed -e 's@^@# @' -e 's@.txt:@.txt '$base'\n@' \ + | sed -e 's@^\([^#][^ ]*\)@ \1%@' -e 's@^#@\n#@' \ + >> $dir/vars/$port.txt || exit 6 + #? grep "^#.*required by $elt" /etc/portage/package.$port/2*txt + + done +#echo 2 + if [ ! -s $dir/vars/$port.txt ] ; then + rm -f $dir/vars/$port.txt $dir/tasks/$TASKS/$port.yml + continue + fi + + INFO $port lines `wc -l $dir/vars/$port.txt` + [ -d $dir/tasks/$TASKS ] || { ERROR no dir $dir/tasks/$TASKS ; continue ; } +#echo 3 + to_yaml=$dir/tasks/$TASKS/$port.yml.new + # overwrite if its there as .new + echo "$YAML_HEAD" > $to_yaml + # sed -e "s/\"\n block: |/ $base\0/" + sed -e 's@^# \([^ ]*\) \([^ /]*\)$@'"$YAML_BODY"'@' $dir/vars/$port.txt \ + >> $to_yaml || exit 7 + # | sed -e "s/BLOCK $dir/BLOCK $dir $base/" + if [ "$port" = "use" -o "$port" = "license" ] ; then + sed -e 's@^ [=<>][=<>]*\([^:<>=]*\)-[0-9][-0-9.]*%@ \1@' -i $to_yaml || exit 8 + sed -e 's@^ [>=][>=]*\([^%]*\)%@ \1@' -i $to_yaml || exit 9 + elif [ "$port" = "mask" -o "$port" = "unmask" -o "$port" = "accept_keywords" ] ; then + sed -e 's@^ \([^:]*\)/\([^%]*\)%@ \1/\2@' -i $to_yaml || exit 10 + fi + sed -e 's@^ \([^:\n]*\)/\([^%\n]*\)%@ \1/\2@' -i $to_yaml || exit 11 + echo >> $to_yaml +#echo 5 + grep % $to_yaml && ERROR "% in $to_yaml" && exit 12 + grep -q "^- [importinclude]*_tasks:.*$port.yml" $dir/tasks/$TASKS/main.yml || { \ + echo "- include_tasks: $TASKS/$port.yml" >> $dir/tasks/$TASKS/main.yml + WARN "ADDED $port.yml to $dir/tasks/$TASKS/main.yml" + } + if [ ! -f $dir/tasks/$TASKS/$port.yml ] ; then + mv $to_yaml $dir/tasks/$TASKS/$port.yml + INFO created $dir/tasks/$TASKS/$port.yml + elif diff -qw $to_yaml $dir/tasks/$TASKS/$port.yml ; then + rm -f $to_yaml + # DBUG unchanged $to_yaml + else + : diff is verbose + fi + done | sed -e 's/ and / /' -e 's/ differ$//' -e 's/^Files/mv/' +done +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_funtoo_world.bash b/overlay/Gentoo/usr/local/sbin/gentoo_funtoo_world.bash new file mode 100755 index 0000000..84ff9ec --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_funtoo_world.bash @@ -0,0 +1,119 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +if [ "$#" -ne 0 ] ; then + ARGS=$* + LARGS="" + else + ARGS="@world" + # --changed-deps --deep + LARGS="-vb --update" + fi + + +LARGS="$LARGS --changed-use --with-bdeps=y --changed-deps-report -k" +LARGS="$LARGS --backtrack=30 --ignore-built-slot-operator-deps=y --keep-going" + +# Skips the packages specified on the command-line that have already been installed. +LARGS="$LARGS --noreplace" + +# dangerous +LARGS="$LARGS --exclude baselayout" + +# gone +LARGS="$LARGS --exclude sci-chemistry/PyMca" +LARGS="$LARGS --exclude dev-python/PyQt4" +LARGS="$LARGS --exclude dev-python/pyqwt" + +# I wont do this routinely +#?LARGS="$LARGS --exclude app-office/libreoffice" +#?LARGS="$LARGS --exclude app-office/libreoffice-bin" + +# /usr/local +LARGS="$LARGS --exclude seamonkey" +LARGS="$LARGS --exclude firefox" +LARGS="$LARGS --exclude thunderbird" + +LARGS="$LARGS --exclude gentoo-sources" + +#? LARGS="$LARGS --exclude net-analyzer/openvas*" +#LARGS="$LARGS --exclude " + +#LARGS="$LARGS --exclude sys-libs/zlib" +LARGS="$LARGS --exclude dev-util/android-sdk-update-manager" +LARGS="$LARGS --exclude dev-util/android-ndk" + +LARGS="$LARGS --exclude sys-kernel/clipos-kernel" + +# always problems with icu +LARGS="$LARGS --exclude dev-libs/icu" + +#emerge: there are no ebuilds to satisfy ">=dev-libs/nsgenbind-0.7". +LARGS="$LARGS --exclude www-client/netsurf" +# emerge: there are no ebuilds to satisfy ">=dev-python/attrs-19.1.0[python_targets_python3_6(-)?,python_targets_python3_7(-)?,-python_single_target_python3_6(-),-python_single_target_python3_7(-)]". +#LARGS="$LARGS --exclude www-client/qutebrowser" + +LARGS="$LARGS --exclude dev-ruby/builder:3.1" + +# required by dev-python/pyqtgraph-0.10.0-r1::python-modules-kit +# required by @selected +# required by @world (argument) +# >=dev-python/PyQt5-5.10.1-r1 -opengl +LARGS="$LARGS --exclude dev-python/pyqtgraph" +LARGS="$LARGS --exclude net-print/hplip" + +LARGS="$LARGS --exclude pentoo/pentoo-scanner" +#LARGS="$LARGS --exclude net-analyzer/wpscan" +#LARGS="$LARGS --exclude dev-ruby/cms_scanner" + +LARGS="$LARGS --exclude net-wireless/gnuradio" # [python_single_target_python2_7,qt5] +LARGS="$LARGS --exclude net-wireless/gr-iio" +LARGS="$LARGS --exclude net-wireless/gr-iqbal" +LARGS="$LARGS --exclude net-wireless/gr-osmosdr" # [iqbalance] +LARGS="$LARGS --exclude pentoo/pentoo-radio" +LARGS="$LARGS --exclude gnuradio" + +# LARGS="$LARGS --exclude " + +# morons +#(dependency required by "dev-ruby/activesupport-4.2.11.1-r1::pentoo[ruby_targets_ruby26,-test]" [ebuild]) +#(dependency required by "dev-ruby/activemodel-4.2.11.1::pentoo[ruby_targets_ruby26]" [ebuild]) +#(dependency required by "dev-ruby/metasploit-model-2.0.4::pentoo[ruby_targets_ruby26]" [ebuild]) +#(dependency required by "net-analyzer/metasploit-4.17.21-r8::pentoo[ruby_targets_ruby25]" [ebuild]) +# +#(dependency required by "dev-ruby/metasploit-concern-2.0.5::pentoo[ruby_targets_ruby26]" [ebuild]) +#(dependency required by "dev-ruby/metasploit-credential-2.0.14::pentoo[ruby_targets_ruby25]" [ebuild]) +#(dependency required by "net-analyzer/metasploit-4.17.21-r8::pentoo[ruby_targets_ruby25]" [ebuild]) +LARGS="$LARGS --exclude net-analyzer/metasploit" + +# pentoo-rce +#?LARGS="$LARGS --exclude dev-util/redasm --exclude pentoo-rce" + +#(dependency required by "dev-ml/ocaml-gettext-0.3.7::gentoo" [ebuild]) +#(dependency required by "app-emulation/libguestfs-1.38.6::gentoo" [ebuild]) +LARGS="$LARGS --exclude dev-ml/ocaml-gettext --exclude app-emulation/libguestfs" + +# broken +#LARGS="$LARGS --exclude net-libs/libtorrent-rasterbar" +#LARGS="$LARGS --exclude net-p2p/qbittorrent" +# 9.0.1 even though it did work before +LARGS="$LARGS --exclude sys-devel/llvm" +# 6.1.6 even though app-emulation/virtualbox worked +# cc1: error: incompatible gcc/plugin versions +# cc1: error: fail to initialize plugin ./scripts/gcc-plugins/randomize_layout_plugin.so +LARGS="$LARGS --exclude app-emulation/virtualbox-modules" +# Unpacking NVIDIA-Linux-x86_64-440.82.run to /mnt/linuxBack52/mnt/tmp/Pentoo19/portage/x11-drivers/nvidia-drivers-440.82-r3/work +#tar: Archive is compressed. Use -J option +LARGS="$LARGS --exclude x11-drivers/nvidia-drivers" + +#(dependency required by "dev-qt/qtx11extras-5.14.2::gentoo" [ebuild]) +#(dependency required by "app-emulation/virtualbox-6.1.6::gentoo[-headless,qt5]" [installed]) +#(dependency required by "app-emulation/libvirt-6.1.0-r1::gentoo[virtualbox]" [ebuild]) +#(dependency required by "app-emulation/qt-virt-manager-9999::testforge" [ebuild]) +LARGS="$LARGS --exclude dev-qt/qtx11extras --exclude app-emulation/virtualbox --exclude app-emulation/libvirt --exclude app-emulation/qt-virt-manager" + +echo INFO: nice python3 emerge $LARGS $ARGS |tee -a world.log +nice python3 `which emerge` $LARGS $ARGS >> world.log 2>&1 +# grep ' U ' world.log |grep -v =|sed -e 's@^.* \] @@' -e 's@-[0-9].*@@' +# cat /var/lib/portage/world>world.elts;grep '^L.*exclude' world.sh|sed -e 's/.*exclude//' -e 's/"$//' -e 's/[*:].*$//' -e 's@/@\\\\/@' |sort -u |while read elt;do [ -z "$elt" ] && continue ; sed -e "/$elt/d" -i world.elts ;done + diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_gemato.bash b/overlay/Gentoo/usr/local/sbin/gentoo_gemato.bash new file mode 100755 index 0000000..e7d9d51 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_gemato.bash @@ -0,0 +1,32 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# Gentoo only + +ROLE=base +PY_MINOR=3.8 + +if [ $# -gt 0 -a $1 = --help ] ; then + /usr/lib/python-exec/python${PY_MINOR}/gemato "$@" + exit $? +fi +. /usr/local/bin/proxy_export.bash >/dev/null + +declare -a G +if [ $1 = gpg-wrap ] ; then + G=(${*:1:1} -W ${*:2}) + # need -- + [[ $G =~ '--' ]] && G+=('--') + [ -f /etc/gnupg/gpgconf.conf ] && G+=(--options /etc/gnupg/gpgconf.conf) + else + G=(${*:1:1} ${*:2}) + fi + +# The --proxy code is for requests and is wrong: $http_proxy or $https_proxy +#? [ -z "$https_proxy" ] || G=(${G:1:1} --proxy $https_proxy ${G:2}) + +echo INFO: /usr/lib/python-exec/python${PY_MINOR}/gemato ${G[@]} >> /tmp/G$$.log 2>&1 +/usr/lib/python-exec/python${PY_MINOR}/gemato "${G[@]}" >> /tmp/G$$.log 2>&1 +ret=$? +DBUG "$ret" >> /tmp/G$$.log 2>&1 +cat /tmp/G$$.log +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_hosts_json.bash b/overlay/Gentoo/usr/local/sbin/gentoo_hosts_json.bash new file mode 100755 index 0000000..8b4daa0 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_hosts_json.bash @@ -0,0 +1,25 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/var/local +ROLE=base +. /usr/local/etc/testforge/testforge.bash || exit 1 +. /usr/local/bin/usr_local_base.bash || exit 2 + +[ -n "$TESTFORGE_ANSIBLE_SRC" ] || TESTFORGE_ANSIBLE_SRC=/g/TestForge/src/ansible +if [ -d "$TESTFORGE_ANSIBLE_SRC" ] ; then + base=`cat /etc/hostname` + json="$TESTFORGE_ANSIBLE_SRC"/tmp/Hosts/$base.json + if [ ! -s $json ] || [ "$TESTFORGE_ANSIBLE_SRC"/hosts.yml -nt $json ] ; then + ansible-inventory --playbook-dir $TESTFORGE_ANSIBLE_SRC -i $TESTFORGE_ANSIBLE_SRC/hosts.yml \ + --host=$base > $json || exit 1$? + fi + eval export BOX_OS_FLAVOR=`jq .BOX_OS_FLAVOR < $json` + DBUG BOX_OS_FLAVOR=$BOX_OS_FLAVOR +fi # 2>/dev/null +[ -z "$BOX_OS_FLAVOR" ] && ERROR BOX_OS_FLAVOR not set && exit 1 +[ "$BOX_OS_FLAVOR" = Ubuntu16 ] && ERROR BOX_OS_FLAVOR = Ubuntu16 && exit 2 +[ "$BOX_OS_FLAVOR" = Devuan4 ] && ERROR BOX_OS_FLAVOR = Devuan4 && exit 2 +echo $BOX_OS_FLAVOR +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_hosts_update_pyver.bash b/overlay/Gentoo/usr/local/sbin/gentoo_hosts_update_pyver.bash new file mode 100755 index 0000000..1f88932 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_hosts_update_pyver.bash @@ -0,0 +1,27 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/var/local +ROLE=base +. /usr/local/etc/testforge/testforge.bash || exit 1 +. /usr/local/bin/usr_local_base.bash || exit 2 + +USAGE="$0 file" +#echo 1 +[ $# -ge 1 ] || { ERROR $prog "#=$#" ; exit 3 ; } + +BOX_OS_FLAVOR=`/usr/local/sbin/gentoo_hosts_json.bash` || exit 4$? + +# "BOX_PYTHON3_MINOR": "3.10" +PYTHON3_VER=`grep BOX_PYTHON3_MINOR <$json|sed -e 's/.*\.//' -e 's/,//' -e 's/"//'` + +# failsafe +# /mnt/o/data/TestForge/src/ansible/roles/base/overlay/Gentoo/usr/local/sbin/gentoo_etc_portage.bash +for file in $* ; do + [ -f $file ] || { WARN $file is not a file ; continue ; } + sed -i $file \ + -e "s/python_single_target_python3_[0-9]*/python_single_target_python3_$PYTHON3_VER/" \ + -e "s/python_targets_python3_[0-9]*/python_targets_python3_$PYTHON3_VER/" + done + diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_link_portage_packages.bash b/overlay/Gentoo/usr/local/sbin/gentoo_link_portage_packages.bash new file mode 100755 index 0000000..dc56bd3 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_link_portage_packages.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash +ROLE=testforge + +shopt -s nullglob || exit 1 + +. /usr/local/bin/usr_local_tput.bash +[ -f /etc/conf.d/hostname ] && . /etc/conf.d/hostname + +if [ -n "$hostname" -a "$hostname" = "pentoo" ] ; then + TO=/mnt/o/Cache/linuxPen19/var/cache/portage/packages + elif [ -f /etc/dracut.conf.d/funtoo.conf ] ; then + TO=/mnt/o/Cache/linuxFun64/var/cache/portage/packages + elif [ -f /etc/dracut.conf.d/clipos.conf ] ; then + TO=/mnt/o/Cache/linuxClipos/var/cache/portage/packages + else + exit 3 +fi + +[ -n "$TO" ] || { echo ERROR: empty TO ; exit 4 ; } +[ -d $TO ] || { echo ERROR: mkdir -p $TO ; exit 5 ; } + +[ "$#" -eq 0 ] && set -- /usr/portage/packages + +for root in $* ; do + cd $root || exit 2$? + INFO $root + find -L $root -type l -delete & + + pushd $TO + # -o -name \*.xpak + find *-* virtual pentoo -type f -name \*tbz2 | grep -v '_sources\|_download_' | while read file ; do + [ -h $root/$file ] && [ -f $file ] && continue + dir=`dirname $root/$file` + [ -d $dir ] || mkdir $dir + ln -s $TO/$file $root/$file + done + popd + done +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_link_to_cache.bash b/overlay/Gentoo/usr/local/sbin/gentoo_link_to_cache.bash new file mode 100755 index 0000000..93ff8f5 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_link_to_cache.bash @@ -0,0 +1,45 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +ROLE=testforge +TXT=/usr/local/sbin/gentoo_link_to_cache.txt + +# replace /usr/net/Http/Makefile +[ "$#" -gt 0 ] && cd $1 || cd /usr/net/Http +[ -z "$DISTFILES" ] && DISTFILES=../../portage/distfiles + +find -L $DISTFILES/ -type l -delete + +find */ prdownloads.sourceforge.net/*/ github.com/*/ -type f -o -type l -name \*.\* | \ + grep -v ' \|\.[a-z]\|/archive/\|/tarball/' \ + | grep -v 'htm$\|html$\|txt$\|Makefile$\|\.py$\|~$' \ + | while read file ; do + base=`basename $file` + [ -h ${DISTFILES}/"$base" ] && continue + from=$(readlink ../../net/Http/"$file") + [ -z "$from" ] && from=../../net/Http/"$file" + ln -s $from ${DISTFILES}/"$base" +done + +# golang/net/archive/8d16fa6dc9a8.tar.gz +# burpsuite_community_v2020.9.1.jar portswigger.net/burp/releases/download@product=community&version=2020.9.1 + # burpsuite_community_v2020.4.jar mirror.leaseweb.com/gentoo/distfiles/0f/burpsuite_community_v2020.4.jar + # burpsuite_community_v2020.4.jar portswigger.net/burp/releases/download@product=community&version=2020.4 + # burpsuite_community_v2020.4.1.jar portswigger.net/burp/releases/download@product=community&version=2020.4.1 + +grep -v '^#' $TXT | while read base file ; do + if [ -z "$base" ] ; then + continue + fi + if [ -z "$file" ] ; then + echo ERROR: empty target for $base && continue + fi + + rm -f "${DISTFILES}/$base" + ln -s ../../net/Http/"$file" ${DISTFILES}/"$base" + [ -e /i/net/Http/distfiles.gentoo.org/distfiles/"$base" ] || \ + ln -s /i/net/Http/"$file" /i/net/Http/distfiles.gentoo.org/distfiles/"$base" + base="" + done +exit 0 +# RfCatDonsCCBootloader-1.9.2.hex atlas0fd00m/rfcat/releases/download/v1.9.2/RfCatDonsCCBootloader.hex diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_lis_to_urls.bash b/overlay/Gentoo/usr/local/sbin/gentoo_lis_to_urls.bash new file mode 100755 index 0000000..c449c75 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_lis_to_urls.bash @@ -0,0 +1,18 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter + +CACHE=/mnt/i/net/Http +grep ^http:// | \ + sed -e 's@ftp://[^ ]*@@g' | \ + while read line ; do + for url in $line ; do + base=`basename "$url"` + [ -e /usr/portage/distfiles/$base ] && break + pre=`sed -e "s@http://@${CACHE}@" <<< $url` + [ -e $pre ] && break + echo $line + break + done + done +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_qcheck_missing.bash b/overlay/Gentoo/usr/local/sbin/gentoo_qcheck_missing.bash new file mode 100755 index 0000000..fbe1b2a --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_qcheck_missing.bash @@ -0,0 +1,9 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# https://forums.gentoo.org/viewtopic-t-1044220-start-0.html +qcheck -C \ + | grep -v -e '.*\.pyo$' -e '.*\.pyc$' \ + -e '.*/examples/.*' -e '^.*/doc/.*/[^/.]*$' \ + -e '^/usr/lib/debug/.*' -e '.*\.debug' \ + -e '^/usr/.*compiled' -e '^/usr/.*\.cache' \ + | grep -B 1 -e '^ MD5-DIGEST:' -e '^ AFK:' -e '^ MTIME:' diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_rebuild_missing.bash b/overlay/Gentoo/usr/local/sbin/gentoo_rebuild_missing.bash new file mode 100755 index 0000000..cdac040 --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_rebuild_missing.bash @@ -0,0 +1,15 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base + +# https://www.commandlinefu.com/commands/view/5988/re-emerge-all-ebuilds-with-missing-files-gentoo-linux +# Re-emerge all ebuilds with missing files (Gentoo Linux) + +qlist --installed --nocolor | uniq | while read cp; do + qlist --exact $cp | grep -v '/usr/share/bash-completion\|/doc/' | while read file; do + test -e "$file" || { echo $cp; echo "$cp: missing $file" 1>&2; break; }; + done; + done + +exit 0 diff --git a/overlay/Gentoo/usr/local/sbin/gentoo_wget_urls.bash b/overlay/Gentoo/usr/local/sbin/gentoo_wget_urls.bash new file mode 100755 index 0000000..f8dde8e --- /dev/null +++ b/overlay/Gentoo/usr/local/sbin/gentoo_wget_urls.bash @@ -0,0 +1,24 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter - arguments are to wget - quoted? + +ROOTDIR=/mnt/i +if [ "$#" -eq 0 ] ; then + WARGS="-xc -P $ROOTDIR/net/Http --tries=1" + else + WARGS="$@" + fi +grep ^http | \ + sed -e 's@^\(https://distfiles.gentoo.org/distfiles/[a-f0-9][a-f0-9]/[^ ]*\) \(https://[^ ]*\) @\2 \1@' | \ + sed -e 's@ftp://[^ ]*@@' -e 's@^https://distfiles.gentoo.org/distfiles/[^ ]* https://pypi.python.org/@https://pypi.python.org/@' -e 's/http:/https:/' \ + -e 's@https*://mirror.leaseweb.com/gentoo/@https://gentoo.osuosl.org@g' \ + -e 's@https*://distfiles.gentoo.org@https://gentoo.osuosl.org@g' | \ + while read urls ; do + url=`sed -e 's@ .*@@' <<< $urls` + base=`basename "$url"` + [ -e /usr/portage/distfiles/$base ] && echo distfiles/$base && continue + for url in $urls ; do + wget --restrict-file-names=windows --no-verbose $WARGS $url || continue + break + done + done diff --git a/overlay/Linux/etc/ssl/blacklist_yasat.lis b/overlay/Linux/etc/ssl/blacklist_yasat.lis new file mode 100644 index 0000000..b799cbc --- /dev/null +++ b/overlay/Linux/etc/ssl/blacklist_yasat.lis @@ -0,0 +1,6 @@ +# from yasat on Ubuntu16 +/etc/ssl/certs/Certplus_Class_2_Primary_CA.pem +/etc/ssl/certs/UTN_USERFirst_Hardware_Root_CA.pem +/etc/ssl/certs/DST_ACES_CA_X6.pem +/etc/ssl/certs/GeoTrust_Global_CA_2.pem +/etc/ssl/certs/Deutsche_Telekom_Root_CA_2.pem diff --git a/overlay/Linux/etc/sysctl.d/10_ptrace.conf b/overlay/Linux/etc/sysctl.d/10_ptrace.conf new file mode 100644 index 0000000..9626a65 --- /dev/null +++ b/overlay/Linux/etc/sysctl.d/10_ptrace.conf @@ -0,0 +1,4 @@ +# https://linux-audit.com/protect-ptrace-processes-kernel-yama-ptrace_scope/ +# kernel.yama.ptrace_scope = 0: all processes can be debugged, as long as they have same uid. This is the classical way of how ptracing worked. +sysctl kernel.yama.ptrace_scope = 0 + diff --git a/overlay/Linux/etc/udev/rules.d/70-persistent-net.rules b/overlay/Linux/etc/udev/rules.d/70-persistent-net.rules new file mode 100644 index 0000000..609f2e9 --- /dev/null +++ b/overlay/Linux/etc/udev/rules.d/70-persistent-net.rules @@ -0,0 +1,50 @@ +# This file was automatically generated by the /lib/udev/write_net_rules +# program, run by the persistent-net-generator.rules rules file. +# +# You can modify it, as long as you keep each rule on a single +# line, and change only the value of the NAME= key. + +# PCI device 0x168c:0x0036 (ath9k) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="70:18:8b:7f:c3:bf", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan0" + +# PCI device 0x10ec:0x8136 (r8169) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="74:86:7a:38:33:24", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth0" + +# USB device 0x148f:0x3070 (usb) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="48:02:2a:53:36:68", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan1" + +# USB device 0x148f:0x5370 (usb) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:87:30:33:5f:38", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan2" + +# USB device 0x148f:0x5370 (usb) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:0c:43:44:5a:e8", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan3" + +# USB device 0x0cf3:0x9271 (usb) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:c0:ca:84:ac:4b", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan4" + +# PCI device 0x168c:0x0036 (ath9k) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="70:18:8b:73:37:9f", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan5" + +# PCI device 0x8086:0x155a (e1000e) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="ec:f4:bb:67:40:1e", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth1" + +# PCI device 0x8086:0x08b1 (iwlwifi) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="80:19:34:af:89:b7", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan6" + +# PCI device 0x10ec:0x8168 (r8169) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="20:47:47:5f:35:2e", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth2" + +# PCI device 0x8086:0x095a (iwlwifi) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="4c:34:88:65:bc:f6", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan7" + +# PCI device 0x8086:0x15a2 (e1000e) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="34:e6:d7:6b:66:0d", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth3" + +# PCI device 0x8086:0x095a (iwlwifi) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="34:02:86:d3:9e:e2", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan8" + +# PCI device 0x8086:0x15a2 (e1000e) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="34:e6:d7:56:fa:c4", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth4" + +# PCI device 0x8086:0x095a (iwlwifi) +SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="34:02:86:19:a5:e6", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="wlan*", NAME="wlan9" diff --git a/overlay/Linux/usr/bootstrap_pip_ansible.bash b/overlay/Linux/usr/bootstrap_pip_ansible.bash new file mode 100755 index 0000000..6121d41 --- /dev/null +++ b/overlay/Linux/usr/bootstrap_pip_ansible.bash @@ -0,0 +1,435 @@ +#!/bin/bash -e +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +shopt -o -s pipefail + +[ $( id -u ) -eq 0 ] || { echo "ERROR: this must be run as root" ; exit 1 ; } + +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local +ROLE=base +WD=$PWD +PYVER=3 +PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +MV=mv +COPY="ln -s" + +[ -z "$BASE_PYTHON2_MINOR" ] && \ + BASE_PYTHON2_MINOR=$( python2 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$BASE_PYTHON3_MINOR" ] && \ + BASE_PYTHON3_MINOR=$( python3 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib64 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR/site-packages ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR/site-packages +fi + +[ -z "$BOX_ALSO_GROUP" ] || BOX_ALSO_GROUP=adm +[ -z "$UPTMP" ] && UPTMP=$PREFIX/tmp +# With packer the files we need are not on the host - they are pushed up and $UPTMP is populated with: +PDIRS="authorized_keys archives boxuser_pip_cache root_pip_cache cacert.pem wheels" +# With vagrant the files may have been tarred on the host and be in their cannonical positions. +# We symlink to files under vagrant to /tmp to leave the packer scripts untouched. +# With packer and docker we can remote mount partitions and not even copy them up to the guest. + +[ -n "$TESTF_DEBIAN10_VAR_APT_ARCHIVES" ] && [ -d "$TESTF_DEBIAN10_VAR_APT_ARCHIVES/" ] && \ + [ ! -e $UPTMP/archives ] && ln -s $TESTF_DEBIAN10_VAR_APT_ARCHIVES/ $UPTMP/archives +[ -n "$HOSTVMS_BOXUSER_PLAY_PIP_CACHE" ] && [ -e "$HOSTVMS_BOXUSER_PLAY_PIP_CACHE" ] && \ + [ ! -e $UPTMP/boxuser_pip_cache ] && ln -s $HOSTVMS_BOXUSER_PLAY_PIP_CACHE/ $UPTMP/boxuser_pip_cache +[ -n "$HOSTVMS_ROOT_PLAY_PIP_CACHE" ] && [ -d "$HOSTVMS_ROOT_PLAY_PIP_CACHE/" ] && \ + [ ! -e $UPTMP/root_pip_cache ] && ln -s "$HOSTVMS_ROOT_PLAY_PIP_CACHE/" $UPTMP/root_pip_cache + +export PLAY_PIP_CERT="/usr/local/etc/ssl/cacert-testforge.pem" +[ -f $PLAY_PIP_CERT ] && \ + [ ! -e $UPTMP/cacert.pem ] && ln -s $PLAY_PIP_CERT $UPTMP/cacert.pem + +# config_file = os.environ.get('PIP_CONFIG_FILE', None) +# /usr/$LIB/python2.7/site-packages/pip/_internal/configuration.py + +bootstrap_mkdir () { mkdir $1 ; chgrp $BOX_ALSO_GROUP $1 ; } + +[ -d /usr/local/tmp ] || { mkdir -p /usr/local/tmp ; chmod 1777 /usr/local/tmp ; } +site_packages=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages +[ -d $site_packages ] || bootstrap_mkdir $site_packages +[ -f $site_packages/__init__.py ] || touch $site_packages/__init__.py +if [ ! -d /usr/local/tmp/wheels ] ; then + cd /usr/local + sh sbin/bootstrap_wheels.bash || exit 2 +fi +[ ! -d $UPTMP/wheels/ ] && [ $UPTMP/ != /usr/local/tmp/ ] && ln -s /usr/local/tmp/wheels $UPTMP/wheels + +# But with vagrant or docker we may have mounted the HOST partitions that contain the files +# [ -z "$TESTF_UBUNTU16_VAR_APT_ARCHIVES" ] && TESTF_UBUNTU16_VAR_APT_ARCHIVES -> $UPTMP/archives + +[ -z "BOX_USER_NAME" ] && BOX_USER_NAME=user +[ -z "BOX_USER_HOME" ] && BOX_USER_HOME=/home/$BOX_USER_NAME +[ -z "BOX_ALSO_GROUP" ] && BOX_ALSO_GROUP=adm +[ -z "$LOGDIR" ] && LOGDIR=$PREFIX/tmp + +[ -d $LOGDIR ] || { mkdir $LOGDIR ; chmod 1777 $LOGDIR ; } + +# not needed: --no-binary :all: --upgrade-strategy only-if-needed +# not yet: --user +PIP_INSTALL_ARGS="--disable-pip-version-check --prefix=$PREFIX --install-option=--prefix=$PREFIX" +scripts="ansible ansible-playbook ansible-pull ansible-doc ansible-galaxy ansible-console ansible-connection ansible-vault" + +export DEBIAN_FRONTEND=noninteractive +export PIP_DEFAULT_TIMEOUT=60 + +ANSIBLE_VER="2.8.12" +#2? PYYAML_VER="3.12" +ansible_tgz=ansible-$ANSIBLE_VER.tar.gz +#2? yaml_tgz=PyYAML-$PYYAML_VER.tar.gz + +if [ -n "$BOX_USER_NAME" ] ; then + # Packer will not have created this and we will need it early. + [ -d $BOX_USER_HOME ] || \ + bootstrap_mkdir $BOX_USER_HOME + #? useradd -d $BOX_USER_HOME -G root -m $BOX_USER_NAME + + # If you want to use your own private key for packer + [ -d $BOX_USER_HOME/.ssh ] || \ + bootstrap_mkdir $BOX_USER_HOME/.ssh + + if [ -f $UPTMP/authorized_keys ] ; then + $COPY $UPTMP/authorized_keys $BOX_USER_HOME/.ssh && \ + chmod 600 $BOX_USER_HOME/.ssh/authorized_keys + fi + chmod 700 $BOX_USER_HOME/.ssh/ +fi + +[ -d /var/cache/apt/archives ] || mkdir -p /var/cache/apt/archives +# If you upload your cache of Ubuntu .debs, it cuts down on the downloading +[ -d $UPTMP/archives ] && \ + $COPY $UPTMP/archives/*.deb /var/cache/apt/archives 2>/dev/null +# leave this for cleanup: +# rm -rf $UPTMP/archives + +# If you upload your cache of pip files, it cuts down on the downloading +if [ -d $UPTMP/boxuser_pip_cache ] ; then + bootstrap_mkdir $BOX_USER_HOME/.cache/ && \ + cp -rip $UPTMP/boxuser_pip_cache $BOX_USER_HOME/.cache/pip && \ + chown -R ${BOX_USER_NAME}.{BOX_ALSO_GROUP} $BOX_USER_HOME/.cache/pip && \ + chmod -R g+rw $BOX_USER_HOME/.cache/pip && \ + chmod -R o-w $BOX_USER_HOME/.cache/pip + fi +if [ -d $UPTMP/root_pip_cache ] ; then + bootstrap_mkdir /root/.cache/ && \ + cp -rip $UPTMP/root_pip_cache /root/.cache/pip && \ + chown -R root.root /root/.cache/pip && \ + chmod -R g+rw /root/.cache/pip && \ + chmod -R o-w /root/.cache/pip +fi + +if [ -d /etc/apt ] ; then + if ! route | grep -q ^default ; then + DEBUG "Not connected; skipping apt-get update" + elif [ ! -f /var/log/dpkg.log ] ; then + apt-get update # || exit 4 + fi + which unzip || ! [ -f /var/cache/apt/archives/unzip_6.0-23+deb10u1_amd64.deb ] || \ + dpkg -i /var/cache/apt/archives/unzip_6.0-23+deb10u1_amd64.deb + which curl || [ ! -f /var/cache/apt/archives/curl_7.64.0-4+deb10u1_amd64.deb ] || \ + dpkg -i /var/cache/apt/archives/curl_7.64.0-4+deb10u1_amd64.deb \ + /var/cache/apt/archives/libcurl4_7.64.0-4+deb10u1_amd64.deb \ + /var/cache/apt/archives/libcurl4-openssl-dev_7.64.0-4+deb10u1_amd64.deb + apt-get install -y --force-yes wget unzip openssl || true + [ -f /usr/include/Python.h ] || \ + apt-get install -y --force-yes \ + libffi-dev libssl-dev python3-dev python3-apt python3-pycparser \ + python3-coverage || \ + echo WARN you must run apt-get update + # msg: Could not find `coverage` module. + +elif [ -d /etc/portage ] ; then + # FixMe: put these in wheels? + [ -x /usr/bin/unzip ] || which unzip 2>/dev/null || emerge -vb app-arch/unzip + [ -x /usr/bin/wget ] || which wget 2>/dev/null || emerge -vb net-misc/wget + which openssl 2>/dev/null || timeout 600 emerge -vb dev-libs/openssl + # openssl installs: + # dev-python/pyopenssl-19.1.0 + # dev-python/six-1.13.0 + # dev-python/cryptography-2.8 + # dev-python/cffi-1.12.3:0/1.12.3 + # dev-python/pycparser-2.19-r1 + # dev-python/ply-3.11:0/3.11 + # virtual/python-ipaddress-1.0-r1 + # dev-python/ipaddress-1.0.23 + # virtual/python-enum34-2 + # dev-python/enum34-1.1.6-r1 + python$PYVER -c 'import OpenSSL' 2>/dev/null || timeout 600 emerge -vb dev-python/pyopenssl + python$PYVER -c 'import pycparser' 2>/dev/null || timeout 600 emerge -vb dev-python/pycparser + python$PYVER -c 'import yaml' 2>/dev/null || timeout 600 emerge -vb dev-python/pyyaml + DEBUG "Gentoo Installed openssl and wget" + fi + +# On a CORP laptop off the VPN we may need some CAs +[ -d $PREFIX/etc/ssl ] || mkdir -p $PREFIX/etc/ssl +[ ! -f $PLAY_PIP_CERT ] && \ + [ -f $UPTMP/cacert.pem ] && \ + $COPY $UPTMP/cacert.pem $PLAY_PIP_CERT + +# pip gets confused +# or just delete $PREFIX/$LIB/python$PYTHON_MINOR/dist-packages afterwards + +for PYVER in 3 ; do + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + + site_packages=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages + [ -d $site_packages ] || bootstrap_mkdir $site_packages + [ -f $site_packages/__init__.py ] || touch $site_packages/__init__.py + if [ -d /etc/apt ] ; then + dist_packages=$PREFIX/lib/python$PYTHON_MINOR/dist-packages + WD=$PWD + if [ -d $dist_packages ] ; then + cd $PREFIX/lib/python$PYTHON_MINOR + ln -s $site_packages . + cd $WD + fi + fi + + # we will use $PREFIX/bin/python3.bash NOT $PREFIX/bin/python3.sh + # to not conflict with what Ansible will push later/before. + if [ ! -e $PREFIX/bin/python$PYVER.bash ] ; then + echo "INFO: bootstraping $PREFIX/bin/python$PYVER.bash" + cat > $PREFIX/bin/python$PYVER.bash << EOF +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# from bootstrap_pip_ansible.bash +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local + +# pip gets confused +dist_packages=$site_packages +dist_packages=\$dist_packages:\${dist_packages}/pip/_vendor +if [ -z "$PYTHONPATH" ] ; then + export PYTHONPATH=\$dist_packages +else + export PYTHONPATH=\$PYTHONPATH:\$dist_packages +fi + +exec python$PYTHON_MINOR "\$@" +EOF + chmod 755 $PREFIX/bin/python$PYVER.bash + + fi + + # pip may be loaded in the base iso + if [ -x $PREFIX/bin/python$PYVER.bash ] && \ + $PREFIX/bin/python$PYVER.bash -c 'import pip' 2>/dev/null ; then + INFO pip$VER already installed + elif [ ! -d $UPTMP/wheels/ ] ; then + WARN $UPTMP/wheels not found + else + # we may be without the VPN/proxy but on a corporate laptop + # with a hosed chain of Certificate Authorities for the MITM proxy + # in which case http://bootstrap.pypa.io/get-pip.py will not work, + # so effective but groddy: + # just unzip the wheels into site-packages and force-reinstall later + cd $UPTMP/wheels/ + + echo "INFO: installing pip - unzipping wheels into $site_packages" + for file in *.whl ; do + #a=$( echo $file | sed -e 's/-.*//' ) + #b=$( basename $a|sed -e 's/Py//'|tr '[A-Z]' '[a-z]' ) + #python$PYVER -c "import $b" 2>/dev/null >/dev/null && continue + unzip -n $file -d $site_packages >/dev/null + done + + # morons + # -rwx------ 1 root root 8866 Jun 11 2018 /usr/local/$LIB/python$PYTHON_MINOR/site-packages/idna-2.7.dist-info/METADATA + find $site_packages -type d -exec chmod a+rx '{}' \; + find $site_packages -type f -exec chmod a+r '{}' \; + chgrp -R "$BOX_ALSO_GROUP" $site_packages + + # hack in a PYTHONPATH for our unzipped wheels - removed later + for elt in pip ; do # is wheel needed? + echo "INFO: Installing $elt" + # use $PYVER.bash for bootstrap - $PYVER.bash will come later + cat > $PREFIX/bin/$elt$PYVER.bash << EOF +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +export PLAY_PIP_CERT=$PIP_CERT +export PYTHONPATH=${site_packages} +export PYTHONPATH=\$PYTHONPATH:${site_packages}/pip/_vendor +#? FixMe: narrow to InsecurePlatformWarning +python$PYVER -W ignore -m $elt "\$@" +EOF + chmod 755 $PREFIX/bin/$elt$PYVER.bash + $PREFIX/bin/$elt$PYVER.bash --help >/dev/null + DEBUG "Installed $elt$PYVER.bash" + done + fi + + # do I still need this + #if [ -x $PREFIX/bin/pip$PYVER ] && [ -d $site_packages ] ; then + # export PYTHONPATH=$site_packages:$site_packages/pip/_vendor + #fi + + if [ ! -x $PREFIX/bin/pip$PYVER.bash ] ; then + echo "ERROR: Failed to Install pip$PYVER at $PREFIX/bin/pip$PYVER.bash" + exit 3 + elif ! $PREFIX/bin/python$PYVER.bash -m pip -V ; then + echo "ERROR: Failed to run pip$PYVER at $PREFIX/bin/pip$PYVER" + exit 4 + fi + + if [ -f $PLAY_PIP_CERT ] ; then + if [ ! -f $site_packages/pip/_vendor/requests/cacert.pem.dst ] && \ + [ -f $site_packages/pip/_vendor/requests/cacert.pem ] && \ + [ ! -h $site_packages/pip/_vendor/requests/cacert.pem ] ; then + mv $site_packages/pip/_vendor/requests/cacert.pem $site_packages/pip/_vendor/requests/cacert.pem.dst + fi + if [ ! -h $site_packages/pip/_vendor/requests/cacert.pem ] ; then + rm -f $site_packages/pip/_vendor/requests/cacert.pem + fi + [ -e $site_packages/pip/_vendor/requests/cacert.pem ] || \ + ln -s $PLAY_PIP_CERT $site_packages/pip/_vendor/requests/cacert.pem + INFO linked $PLAY_PIP_CERT $site_packages/pip/_vendor/requests/cacert.pem + fi + done + +# dont use -CAfile $UPTMP/cacert.pem - we want it to fail if we need the cert +if openssl s_client -connect pypi.org:443 > install.log + retval=$? + cd $WD + return $retval +} +# NOW we use our fresh pip to install ansible from source, into /usr/local +if [ -x $PREFIX/bin/ansible ] ; then + INFO already installed $PREFIX/bin/ansible + else + if true ; then + DEBUG "$PREFIX/bin/pip$PYVER.bash install $PIP_INSTALL_ARGS $UPTMP/wheels/$ansible_tgz" + # install from the file to keep the version pinned + $PREFIX/bin/pip$PYVER.bash install $PIP_INSTALL_ARGS $UPTMP/wheels/$ansible_tgz \ + >> $LOGDIR/pip_install_pip_ansible.log 2>&1 || \ + { ERROR installing $ansible_tgz ; cat $LOGDIR/pip_install_pip_ansible.log && exit 7 ; } + else + boostrap_setup_ansible + [ $? -eq 0 ] || { ERROR installing ansible ; tail install.log ; exit 8 ; } + fi + if [ -d /etc/portage/ ] ; then + [ -d /etc/portage/profile ] || mkdir /etc/portage/profile + grep -q app-admin/ansible-$ANSIBLE_VER /etc/portage/profile/package.provided || \ + echo app-admin/ansible-$ANSIBLE_VER >> /etc/portage/profile/package.provided + fi + + cd $PREFIX/bin + [ -e ansible-doc ] || { ERROR installing ansible-doc ; exit 9 ; } + grep "#\!.$PREFIX/bin/python$PYVER.bash" ansible-doc || \ + sed -e "s@^#\!.*python.*@#\!${PREFIX}/bin/python$PYVER.bash@" -i $scripts +fi + +ansible --version || exit 10 + +if [ -f $PLAY_PIP_CERT ] ; then + export PLAY_PIP_CERT=$PIP_CERT + PIP_INSTALL_ARGS="$PIP_INSTALL_ARGS --cert $PLAY_PIP_CERT" + else + echo "WARN: PLAY_PIP_CERT not found $PIP_CERT" + fi + +if [ ! -f /etc/wgetrc ] ; then + sh $WD/bootstrap_proxy.bash +fi + +# pip uses curl - and has a config file PIP_CONFIG +DEBUG "http_proxy=$http_proxy https_proxy=$https_proxy" +if [ -n "$https_proxy" ] ; then + echo "INFO: Adding to PIP_INSTALL_ARGS --proxy=$https_proxy" + elif [ -f /etc/wgetrc ] && grep ^http_proxy /etc/wgetrc ; then + proxy=$( grep ^http_proxy /etc/wgetrc|sed -e 's@.*=@--proxy=@' ) + echo "INFO: Adding to PIP_INSTALL_ARGS $proxy" + PIP_INSTALL_ARGS="$PIP_INSTALL_ARGS $proxy" +fi + +cd $PREFIX/src +# install pycurl as a test of pip and a requisite for proxyauth.py +if ! $PREFIX/bin/python$PYVER.bash -c 'import curl' 2>/dev/null ; then + if [ -d /etc/apt ] ; then + apt-get install -y --force-yes libcurl4-openssl-dev \ + 2>&1|tee $LOGDIR/apt-get_install_libcurl4-openssl-dev.log + elif [ -d /etc/portage ] ; then + [ -x /usr/bin/curl ] || which curl 2>/dev/null || emerge -vb curl + fi + #? --allow-unverified pycurl + if ! route | grep -q ^default ; then + DEBUG "Not connected; not installing pycurl" + elif $PREFIX/bin/pip$PYVER.bash install $PIP_INSTALL_ARGS pycurl >> $LOGDIR/pip_install_pycurl.log 2>&1 ; then + echo "INFO: Installed pycurl from pip with $PREFIX/bin/pip install $PIP_INSTALL_ARGS" + # We dont fail the packer build if it errors - just fix it and rerun + $PREFIX/bin/python$PYVER.bash -c 'import curl; print curl.__file__' || true + else + echo "WARN: Installing pycurl failed with $PREFIX/bin/pip install $PIP_INSTALL_ARGS" + cat $LOGDIR/pip_install_pycurl.log + fi + fi + +[ -e /usr/local/bin/python$PYVER.sh ] || \ + [ -h /usr/local/bin/python$PYVER.sh ] || \ + ln -s /usr/local/bin/python$PYVER.babash /usr/local/bin/python$PYVER.sh + +find /usr/local/$LIB/python$PYVER.7/site-packages/ansible/modules/ -name \*.py \ + -exec grep -q /usr/bin/python '{}' \; -print \ + -exec sed -e "1,$PYVERs@#!/usr/bin/python@#!/usr/local/bin/python$PYVER.bash@" -i '{}' \; + +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_certdata2pem.bash b/overlay/Linux/usr/local/bin/base_certdata2pem.bash new file mode 100755 index 0000000..94cbe0d --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_certdata2pem.bash @@ -0,0 +1,7 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base +prog=$( basename $0 .bash ) + +exec python3.sh /usr/local/bin/base_certdata2pem.py "$@" diff --git a/overlay/Linux/usr/local/bin/base_certdata2pem.py b/overlay/Linux/usr/local/bin/base_certdata2pem.py new file mode 100644 index 0000000..0b02b2a --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_certdata2pem.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# vim:set et sw=4: +# +# certdata2pem.py - splits certdata.txt into multiple files +# +# Copyright (C) 2009 Philipp Kern +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, +# USA. + +import base64 +import os.path +import re +import sys +import textwrap +import io + +objects = [] + +# Dirty file parser. +in_data, in_multiline, in_obj = False, False, False +field, type, value, obj = None, None, None, dict() + +# Python 3 will not let us decode non-ascii characters if we +# have not specified an encoding, but Python 2's open does not +# have an option to set the encoding. Python 3's open is io.open +# and io.open has been backported to Python 2.6 and 2.7, so use io.open. +for line in io.open('certdata.txt', 'rt', encoding='utf8'): + # Ignore the file header. + if not in_data: + if line.startswith('BEGINDATA'): + in_data = True + continue + # Ignore comment lines. + if line.startswith('#'): + continue + # Empty lines are significant if we are inside an object. + if in_obj and len(line.strip()) == 0: + objects.append(obj) + obj = dict() + in_obj = False + continue + if len(line.strip()) == 0: + continue + if in_multiline: + if not line.startswith('END'): + if type == 'MULTILINE_OCTAL': + line = line.strip() + for i in re.finditer(r'\\([0-3][0-7][0-7])', line): + value.append(int(i.group(1), 8)) + else: + value += line + continue + obj[field] = value + in_multiline = False + continue + if line.startswith('CKA_CLASS'): + in_obj = True + line_parts = line.strip().split(' ', 2) + if len(line_parts) > 2: + field, type = line_parts[0:2] + value = ' '.join(line_parts[2:]) + elif len(line_parts) == 2: + field, type = line_parts + value = None + else: + raise NotImplementedError('line_parts < 2 not supported.') + if type == 'MULTILINE_OCTAL': + in_multiline = True + value = bytearray() + continue + obj[field] = value +if len(obj) > 0: + objects.append(obj) + +# Read blacklist. +blacklist = [] +if os.path.exists('blacklist.txt'): + for line in open('blacklist.txt', 'r'): + line = line.strip() + if line.startswith('#') or len(line) == 0: + continue + item = line.split('#', 1)[0].strip() + blacklist.append(item) + +# Build up trust database. +trust = dict() +for obj in objects: + if obj['CKA_CLASS'] != 'CKO_NSS_TRUST': + continue + if obj['CKA_LABEL'] in blacklist: + print("Certificate %s blacklisted, ignoring." % obj['CKA_LABEL']) + elif obj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_TRUSTED_DELEGATOR': + trust[obj['CKA_LABEL']] = True + elif obj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_NOT_TRUSTED': + print('!'*74) + print("UNTRUSTED BUT NOT BLACKLISTED CERTIFICATE FOUND: %s" % obj['CKA_LABEL']) + print('!'*74) + else: + print("Ignoring certificate %s. SAUTH=%s, EPROT=%s" % \ + (obj['CKA_LABEL'], obj['CKA_TRUST_SERVER_AUTH'], + obj['CKA_TRUST_EMAIL_PROTECTION'])) + +for obj in objects: + if obj['CKA_CLASS'] == 'CKO_CERTIFICATE': + if not obj['CKA_LABEL'] in trust or not trust[obj['CKA_LABEL']]: + continue + bname = obj['CKA_LABEL'][1:-1].replace('/', '_')\ + .replace(' ', '_')\ + .replace('(', '=')\ + .replace(')', '=')\ + .replace(',', '_') + + # this is the only way to decode the way NSS stores multi-byte UTF-8 + # and we need an escaped string for checking existence of things + # otherwise we're dependant on the user's current locale. + if bytes != str: + # We're in python 3, convert the utf-8 string to a + # sequence of bytes that represents this utf-8 string + # then encode the byte-sequence as an escaped string that + # can be passed to open() and os.path.exists() + bname = bname.encode('utf-8').decode('unicode_escape').encode('latin-1') + else: + # Python 2 + # Convert the unicode string back to its original byte form + # (contents of files returned by io.open are returned as + # unicode strings) + # then to an escaped string that can be passed to open() + # and os.path.exists() + bname = bname.encode('utf-8').decode('string_escape') + + fname = bname + b'.crt' + if os.path.exists(fname): + print("Found duplicate certificate name %s, renaming." % bname) + fname = bname + b'_2.crt' + f = open(fname, 'w') + f.write("-----BEGIN CERTIFICATE-----\n") + encoded = base64.b64encode(obj['CKA_VALUE']).decode('utf-8') + f.write("\n".join(textwrap.wrap(encoded, 64))) + f.write("\n-----END CERTIFICATE-----\n") + diff --git a/overlay/Linux/usr/local/bin/base_check_site_py.bash b/overlay/Linux/usr/local/bin/base_check_site_py.bash new file mode 100755 index 0000000..40ff88a --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_check_site_py.bash @@ -0,0 +1,90 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/usr/local +ROLE=base +[ "$#" -eq 0 ] && echo USAGE: $0 2.7 ... 3.9 3.10 3.11 && exit 1 + +. /usr/local/bin/usr_local_base.bash || exit 2 +[ -f $PREFIX/etc/testforge/testforge.bash ] \ + && . /usr/local/etc/testforge/testforge.bash + +PYTHON_MINOR="$1" +PYMAJ="${PYTHON_MINOR:0:1}" + +if [ -z "$LIB" -a -f /usr/lib/python$PYTHON_MINOR/site.py ] ; then + LIB=lib +elif [ -z "$LIB" -a -f /usr/lib64/python$PYTHON_MINOR/site.py ] ; then + LIB=lib64 +fi +[ -d /usr/local/lib/python$PYTHON_MINOR ] && \ + [ ! -e /usr/local/lib64/python$PYTHON_MINOR ] && \ + ln -s /usr/local/lib/python$PYTHON_MINOR /usr/local/lib64/python$PYTHON_MINOR + +if [ "" = "$BASE_PYTHON2_MINOR" ] ; then + not_PYTHON_MINOR="" +elif [ $PYTHON_MINOR = "$BASE_PYTHON2_MINOR" ] ; then + not_PYTHON_MINOR="$BASE_PYTHON3_MINOR" +elif [ $PYTHON_MINOR = "$BASE_PYTHON3_MINOR" ] ; then + not_PYTHON_MINOR="$BASE_PYTHON2_MINOR" +else + ERROR "$PYTHON_MINOR not in $BASE_PYTHON2_MINOR $BASE_PYTHON3_MINOR" + exit 1 +fi +INFO $prog PYMAJ=$PYMAJ PYTHON_MINOR=$PYTHON_MINOR not_PYTHON_MINOR=$not_PYTHON_MINOR PYTHONPATH=$PYTHONPATH +export PYTHONPATH="" + +if [ "$PYMAJ" = '2' ] ; then + imp='import sys; print sys.path' + elif [ "$PYMAJ" = '3' ] ; then + imp='import sys; print(repr(sys.path))' + fi + +[ -x $PREFIX/bin/python$PYMAJ.sh ] || { + echo >&2 ERROR: $prog 2 -x $PREFIX/bin/python$PYMAJ.sh "$PYTHON_MINOR" && exit 2 ; +} +if [ -f /etc/python-exec/python2.conf ] ; then + grep -F "$BASE_PYTHON2_MINOR" /etc/python-exec/python2.conf || { + echo >&2 ERROR: $prog 3 "$BASE_PYTHON2_MINOR" /etc/python-exec/python2.conf + } +fi +if [ -f /etc/python-exec/python3.conf ] ; then + grep -F "$BASE_PYTHON3_MINOR" /etc/python-exec/python3.conf || { + echo >&2 ERROR: $prog 4 "$BASE_PYTHON3_MINOR" /etc/python-exec/python3.conf + } +fi + +# echo -n DEBUG: $prog 2 python$PYTHON_MINOR -S -s +python$PYMAJ -S -s -c "$imp" \ + || { echo >&2 ERROR: $prog 22 $PYTHON_MAJ -S -s"$PYTHON_MINOR" && exit 22 ; } + +# echo -n DEBUG: $prog 4 python$PYTHON_MINOR -s +python$PYMAJ -s -c "$imp" \ + || { echo >&2 ERROR: $prog 4 python$PYTHON_MINOR -s "$PYTHON_MINOR" && exit 4 ; } + +# echo -n DEBUG: $0 6 $PREFIX/bin/python$PYMAJ.sh -S -s +$PREFIX/bin/python$PYMAJ.sh -S -s -c "$imp" \ + || { echo >&2 ERROR: $prog 6 python$PYMAJ.sh -S -s "$PYTHON_MINOR" && exit 6 ; } + + +echo -n DEBUG: $0 8 $PREFIX/bin/python$PYMAJ.sh -s +$PREFIX/bin/python$PYMAJ.sh -s -c "$imp" \ + || { echo >&2 ERROR: $prog 8 python$PYMAJ.sh -s "$PYTHON_MINOR" && exit 8 ; } + +# INFO $prog 10 $PREFIX/bin/python$PYMAJ.sh sitecustomize.py "$PYTHON_MINOR" +a=$( $PREFIX/bin/python$PYMAJ.sh $PREFIX/$LIB/python$PYTHON_MINOR/site-packages/sitecustomize.py ) || \ + { echo >&2 ERROR: $prog "error 10 $PREFIX/bin/python$PYMAJ.sh $PREFIX/$LIB/python$PYTHON_MINOR/site-packages/sitecustomize.py" && exit 10 ; } +#[ -x "$a" ] || \ +# { echo >&2 ERROR: $prog 11 "broken $PREFIX/bin/python$PYMAJ.sh /usr/local/bin/python2.sh - $a" && exit 11 ; } +#echo >&2 INFO: $prog 11 "$a" + +# INFO $prog 12 python$PYTHON_MINOR sitecustomize.py "$PYTHON_MINOR" +python$PYMAJ $PREFIX/$LIB/python$PYTHON_MINOR/site-packages/sitecustomize.py || \ + { ERROR 12 $prog python$PYMAJ sitecustomize.py "$PYTHON_MINOR" && exit 12 ; } + +exit 0 +# [ $( python2.sh {{BASE_USR_LOCAL}}/$LIB/python{{BASE_PYTHON2_MINOR}}/site-packages/sitecustomize.py ) = {{BASE_USR_LOCAL}}/bin/python2.sh ] || exit 2 +# [ $( python3.sh {{BASE_USR_LOCAL}}/$LIB/python{{BASE_PYTHON3_MINOR}}/site-packages/sitecustomize.py ) = {{BASE_USR_LOCAL}}/bin/python3.sh ] || exit 3 +# [ $( python2.bash {{BASE_USR_LOCAL}}/$LIB/python{{BASE_PYTHON2_MINOR}}/site-packages/sitecustomize.py ) = /var/local/bin/python2.bash ] || exit 22 +# [ $( python3.bash {{BASE_USR_LOCAL}}/$LIB/python{{BASE_PYTHON3_MINOR}}/site-packages/sitecustomize.py ) = /var/local/bin/python3.bash ] || exit 33 diff --git a/overlay/Linux/usr/local/bin/base_clean_filenames.bash b/overlay/Linux/usr/local/bin/base_clean_filenames.bash new file mode 100755 index 0000000..ed96807 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_clean_filenames.bash @@ -0,0 +1,27 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base +PREFIX=/usr/local +prog=$( basename $0 .bash ) + +. /usr/local/bin/usr_local_tput.bash + +# accepted files or directories -- to recusively look for files in +[ "$#" -eq 0 ] && set -- $PWD/ + +# Clean the bad ones under Windows: [:] and other uglies ['"{}[]?!] +# The Bad ones break rsync and but the others can cause trouble elsewhere +re='[^ .,~%+=^@!0-9a-zA-z_()#-]' + +find "$@" -type f -or -type d | while read file ; do + dir=`dirname "$file"` + base=`basename "$file"` + # wierd = misses "ZeeRex The Explainable ``Explain__ Service.htm" + new=`sed -f $PREFIX/share/sed/base_clean_filenames.sed <<< $base` + [ "$base" = "$new" ] && continue + [ -f "$file" -a -f "$dir/$new" ] && diff -qr "$file" "$dir/$new" && rm -f "$file" && continue + DBUG \"$file\" \"$dir/$new\" + mv -i "$file" "$dir/$new" + done +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_clean_path.bash b/overlay/Linux/usr/local/bin/base_clean_path.bash new file mode 100755 index 0000000..bf7e2ab --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_clean_path.bash @@ -0,0 +1,24 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +# we use stdout + +ROLE=base +prog=$( basename $0 .bash ) + +N="" +IFS=':' +[ -z "$UID" ] && UID=$( id -u ) +for elt in $PATH ; do + [ $UID -eq 0 -a "$elt" = '.' ] && continue + [ -d "$elt" ] || continue + [ -z "$N" ] && N="$elt" && continue + [[ $N =~ (^|:)${elt}(:|$) ]] && continue + N="$N:$elt" && continue + done +IFS=' ' + +elt=/var/local/bin +[[ "$N" =~ (^|:)"${elt}"(:|$) ]] || N="$N:$elt" + +echo $N +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_clean_pythonpath.bash b/overlay/Linux/usr/local/bin/base_clean_pythonpath.bash new file mode 100755 index 0000000..2de0ca9 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_clean_pythonpath.bash @@ -0,0 +1,40 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +# answer output +prog=$( basename $0 .bash ) +ROLE=base + +[ $# -lt 2 ] && echo "USAGE: $0 PYTHON_MINOR PPATH" >>/proc/self/fd/2 && exit 1 +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local +PYTHON_MINOR=$1 +PPATH=$2 + +PYVER=$( echo $1|sed -e 's/.*python//' -e 's@/.*@@' ) + +[[ "$PYTHON_MINOR" =~ .*2\..* ]] && notPYVER="3." || notPYVER="2." +# echo "DEBUG: $1 $PPATH $notPYVER" >>/proc/self/fd/2 + +N="" +IFS=':' +warns=0 +[ -z "$UID" ] && UID=$( id -u ) +for elt in $PPATH ; do + [ -d "$elt" ] || continue + + [[ $elt =~ .*python${notPYVER}.* ]] ; a=$? + # DBUG $1 $elt $notPYVER a=$a >>/proc/self/fd/2 + [ $a -eq 0 ] && { WARN $prog wanted: $PYTHON_MINOR got: $elt >>/proc/self/fd/2 ; \ + warns=$( expr $warns + 1 ) ; continue ; } + + [ -z "$N" ] && N="$elt" && continue + [[ $N =~ $elt ]] && continue + [ -n "$N" ] && N="$N:$elt" + # DBUG $prog adding: $elt + done +IFS=' ' + +echo $N + +exit $warns diff --git a/overlay/Linux/usr/local/bin/base_daily.bash b/overlay/Linux/usr/local/bin/base_daily.bash new file mode 100755 index 0000000..c619ebc --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_daily.bash @@ -0,0 +1,95 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local +ROLE=base + +# The idea here is to run ansible_local.bash --tags daily +# and then use this to do the parsing and throwing errors based on the output. +# This was the ansible run can be free from erroring and this can be +# run repeatedly anytime outside of ansible to deal with the issues raised. +# It is also run at the end of ansible_local.bash --tags daily to raise the issues. + +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +[ -f /usr/local/etc/testforge/testforge.bash ] && . /usr/local/etc/testforge/testforge.bash + +. /usr/local/etc/local.d/local.bash + +MYID=$( id -u ) +[ $MYID -eq 0 ] || { ERROR $prog must be run as root $MYID ; exit 1 ; } +LOG_DIR=/usr/local/tmp + +ly=daily +errs=0 +warns=0 + +# sh /usr/local/bin/base_hourly.bash +LOG_DIR=/usr/local/tmp/$ly +[ -d "$LOG_DIR" ] || mkdir -p "$LOG_DIR" +ELOG=$LOG_DIR/E${prog}_${ly}$$.log +WLOG=$LOG_DIR/W${prog}_${ly}$$.log +OUT=$LOG_DIR/O${prog}_${ly}$$.log +rm -f $LOG_DIR/*${prog}_${ly}*.log + +if [ -f /var/log/dmesg.log ] ; then + grep 'IOMMU enabled' /var/log/dmesg.log || WARN NOT 'IOMMU enabled' | tee -a $WLOG +fi + +cp /dev/null /var/log/dirmngr.log +/usr/local/bin/base_gnupg_test.bash || ERROR $retval /usr/local/bin/base_gnupg_test.bash >> $WLOG +[ -d /etc/portage ] && \ + grep 'ERR 219 Server indicated a failure' /var/log/dirmngr.log >> $ELOG + +[ -f /usr/local/etc/testforge/testforge.bash ] && \ + . /usr/local/etc/testforge/testforge.bash + +[ -z "$UPTMP" ] && UPTMP=$PREFIX/tmp +if [ -d /etc/apt -a -d /o/Cache/Apt/Devuan/4 ] ; then + [ -z "$TESTF_UBUNTU16_VAR_APT_ARCHIVES" ] && \ + TESTF_UBUNTU16_VAR_APT_ARCHIVES=/o/Cache/Apt/Devuan/4 + [ -z "BOX_USER_NAME" ] && BOX_USER_NAME=devuan +else + [ -z "BOX_USER_NAME" ] && BOX_USER_NAME=vagrant +fi +if [ -d /o/Cache/Pip/ ] ; then + [ -z "$HOSTVMS_BOXUSER_PIP_CACHE" ] && \ + HOSTVMS_BOXUSER_PIP_CACHE=/o/Cache/Pip/ +fi + +# FixMe: bootstrap +elt=pip ; DBUG $elt +scripts="ansible ansible-playbook ansible-pull ansible-doc ansible-galaxy ansible-console ansible-connection ansible-vault" +for PYVER in 2 3 ; do + pfile=`python$PYVER.sh -c 'import pip; print(pip.__file__)'` + [ $? -eq 0 -a -f $pfile ] && continue + # /usr/local/sbin/bootstrap_pip.bash + pfile=`python$PYVER.sh -c 'import pip; print(pip.__file__)'` + [ $? -eq 0 -a -f $pfile ] || WARN pip $PYVER not installed - $pfile + for elt in $scripts ; do + [ -e $PREFIX/bin/$elt ] || { WARN installing $elt $PYVER ; } + done +done + +elt=doctest3 +if [ $MYID -ne 0 ] ; then + /var/local/bin/testforge_python_doctest3.bash \ + /var/local/share/doc/txt/base3.txt \ + > "$LOG_DIR"/$elt$$.log 2>&1 || ERROR $elt >> $ELOG +fi + +[ -f $WLOG ] && warns=$( wc -l $WLOG | cut -f 1 -d ' ' ) +[ $? -eq 0 -a $warns -ne 0 ] && \ + WARN "$prog $warns $ly $prog warnings in $WLOG" + +[ -f $ELOG ] && errs=$( wc -l $ELOG | cut -f 1 -d ' ' ) +[ $? -eq 0 -a $errs -ne 0 ] && \ + echo "ERROR: $prog $errs $ly $prog errors in $ELOG" && cat $ELOG + +[ $errs -eq 0 ] && \ + [ $warns -eq 0 ] && \ + INFO "$prog No $ly errors" && \ + rm -f $WLOG $ELOG $OUT + +exit $errs diff --git a/overlay/Linux/usr/local/bin/base_daily.exp b/overlay/Linux/usr/local/bin/base_daily.exp new file mode 100644 index 0000000..dd59f43 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_daily.exp @@ -0,0 +1,51 @@ +#!/usr/bin/expect -- +# -*- mode: tcl; tab-width: 8; encoding: utf-8-unix -*- + +set timeout 30 + +set KEY_ID 96D8BF6D +#? stty raw -echo + +spawn gpg --home /etc/portage/gnupg --edit-key $KEY_ID trust + +# unknown] (1). Gentoo ebuild repository signing key (Automated Signing Key) +# unknown] (2) Gentoo Portage Snapshot Signing Key (Automated Signing Key) + +## tsign +#expect "Really sign all user IDs? (y/N)?*" +#send_user "Sending y\n" +#send "y\n" +# tsign -> gpg: no default secret key: No secret key + +# trust +expect "Your decision?*" +send_user "Sending 4\n" +send "4\n" + +# No save is required for trust +expect "gpg>*" +send_user "Sending save\r" +send "save\r" +expect -re .+ { + exp_continue + } timeout { + exit 1 + } eof { + exit 0 + } "Key not changed so no update needed*" { + exit 0 + } + +expect "gpg>*" +send_user "Sending quit\r" +send "quit\r" + +expect -re .+ { + exp_continue + } timeout { + exit 1 + } eof { + exit 0 + } + +# expect -r .+ {send "\r"} diff --git a/overlay/Linux/usr/local/bin/base_daily.html b/overlay/Linux/usr/local/bin/base_daily.html new file mode 100644 index 0000000..48c9a77 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_daily.html @@ -0,0 +1,344 @@ + + + + + Release media signatures – Gentoo Linux + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + +
+
+
+

Release media signatures

+ +

+ Our current releases are signed with either of these keys or any sub keys: +

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key FingerprintDescriptionCreatedExpiry
13EBBDBEDE7A12775DFDB1BABB572E0E2D182910Gentoo Linux Release Engineering (Automated Weekly Release Key)2009-08-252022-07-01
DCD05B71EAB94199527F44ACDB6B8C1F96D8BF6DGentoo ebuild repository signing key (Automated Signing Key)2011-11-252022-07-01
EF9538C9E8E64311A52CDEDFA13D0EF1914E7A72Gentoo repository mirrors (automated git signing key)2018-05-282022-07-01
D99EAC7379A850BCE47DA5F29E6438C817072058Gentoo Linux Release Engineering (Gentoo Linux Release Signing Key)2004-07-202022-01-01
ABD00913019D6354BA1D9A132839FE0D796198B1Gentoo Authority Key L12019-04-012022-07-01
18F703D702B1B9591373148C55D3238EC050396EGentoo Authority Key L2 for Services2019-04-012022-07-01
2C13823B8237310FA213034930D132FF0FF50EEBGentoo Authority Key L2 for Developers2019-04-012022-07-01
+ +
+ +
+
+

Verifying files

+
+
+

To verify downloaded files are not tampered with, you need the .DIGESTS file matching your release and the matching key from the table above.

+ +

Fetch the key:

+ +

gpg --keyserver hkps://keys.gentoo.org --recv-keys <key fingerprint>

+ +

Alternatively, you can fetch a bundle containing all listed keys:

+ +

wget -O - https://qa-reports.gentoo.org/output/service-keys.gpg | gpg --import

+ +

Verify the DIGESTS file:

+ +

gpg --verify <foo.DIGESTS.asc>

+ +

Verify the download matches the digests. At least one of the following will exist:

+ +

sha512sum -c <foo.DIGESTS.asc>

+

sha256sum -c <foo.DIGESTS.asc>

+

sha1sum -c <foo.DIGESTS.asc>

+ +
+ +
+ Detailed instructions are available in the Gentoo Handbook. +
+
+
+ +
+
+
+ + + + + + + + + + diff --git a/overlay/Linux/usr/local/bin/base_get_if.bash b/overlay/Linux/usr/local/bin/base_get_if.bash new file mode 100755 index 0000000..9e5fe71 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_get_if.bash @@ -0,0 +1,10 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +# remove + +prog=$( basename $0 .bash ) +PREFIX=/usr/local +ROLE=base + +exec bash /usr/local/bin/proxy_get_if.bash "$@" diff --git a/overlay/Linux/usr/local/bin/base_hourly.bash b/overlay/Linux/usr/local/bin/base_hourly.bash new file mode 100755 index 0000000..77d2efe --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_hourly.bash @@ -0,0 +1,56 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +PREFIX=/usr/local +ROLE=base +NOW=`date +%Y-%m-%d` +NOWM=`date +%Y-%m` +prog=$( basename $0 .bash ) + +. /usr/local/bin/usr_local_tput.bash || exit 2 + +[ -f /usr/local/etc/testforge/testforge.bash ] && . /usr/local/etc/testforge/testforge.bash +[ $( id -u ) -eq 0 ] || { ERROR $prog should be run as root ; exit 1 ; } + +ly=hourly +errs=0 +warns=0 + +elt=base +LOG_DIR=/usr/local/tmp +ELOG=$LOG_DIR/E${prog}_${ly}$$.log +WLOG=$LOG_DIR/W${prog}_${ly}$$.log +OUT=$LOG_DIR/O${prog}_${ly}$$.log +find $LOG_DIR/*${prog}_${ly}*.log -ctime +2 -delete + +ansible-inventory 2>> $WLOG || ERROR ansible-inventory $? >> $ELOG + +if ip route | grep -v ^def ; then + gpg-connect-agent --dirmngr 'keyserver --hosttable' /bye || exit 3$? + dirmngr-client -v --ping /tmp/P$$.log 2>&1 + if [ $? -ne 0 ] ; then + lelt=$( echo $elt | tr '[:upper:]' '[:lower:]' ) + if [ "$lelt" != "$elt" ] ; then + [ $PYVER = 2 ] && str="import $lelt;print $lelt.__file__" || \ + str="import $lelt;print($lelt.__file__)" + $PREFIX/bin/python$PYVER.sh -c $str >/tmp/P$$.log 2>&1 || \ + { rm -f /tmp/P$$.log ; continue ; } + fi + fi + + grep /usr/lib /tmp/P$$.log && DBUG $PYVER $elt $rest && continue + grep $PREFIX /tmp/P$$.log && INFO $PYVER $elt $rest && continue + cat /tmp/P$$.log && WARN $PYVER $elt $rest && continue + + done + +rm -f /tmp/P$$.log + +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_pip_upgrade.bash b/overlay/Linux/usr/local/bin/base_pip_upgrade.bash new file mode 100755 index 0000000..35b18f5 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_pip_upgrade.bash @@ -0,0 +1,122 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +# pip installs into /usr/local/bin +# export PATH=.:$PATH:/usr/local/bin + +prog=$( basename $0 .bash ) +ROLE=base +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local + +. /usr/local/etc/testforge/testforge.bash || exit 1 + +[ -d PREFIX=/var/local/var/log ] && \ + BASE_LOG_DIR=/var/local/var/log || \ + BASE_LOG_DIR=/tmp + +pyver=3 +inter=0 +verbose=3 + +usage() { + echo "Usage: $0 [OPTIONS] dirs-or-files" + echo + echo " -i | --inter=$inter - interactivly upgrade 0 or 1 [0]" + echo " -p | --pyver=$pyver - python version - 2 or 3" + echo " -v | --verbose=$verbose - verbosity 0 least 5 most" + echo + echo " -V | --version - print version of this script" + echo " -h | --help - print this help" +} + +exitWithErrMsg() { + retval=$1 + shift + echo "$1" 1>&2 + exit $retval +} + +SHORTOPTS="hVp:v:i:" +LONGOPTS="help,version,pyver:,verbose:,inter:" +PKGS= + +ARGS=$(getopt --options $SHORTOPTS --longoptions $LONGOPTS -- "$@") +[ $? != 0 ] && exitWithErrMsg 1 "Aborting." + +route | grep -q ^default || exitWithErrMsg 2 "We are not connected: Aborting." + +eval set -- "$ARGS" + +while true; do + case "$1" in + -p|--pyver) + shift + pyver="$1" + ;; + -i|--inter) + shift + inter=1 + ;; + -v|--verbose) + shift + verbose="$1" + ;; + -h|--help) + usage + exit 0 + ;; + '--') + shift + PKGS="$*" + break + ;; + *) + break + ;; + esac + shift +done + +#echo $PKGS +if [[ $pyver =~ 2.* ]] ; then + LOG_DIR=$BASE_LOG_DIR/pip/$BASE_PYTHON2_MINOR + pip_exe=/usr/local/bin/pip2.sh + else + LOG_DIR=$BASE_LOG_DIR/testforge/pip/$BASE_PYTHON3_MINOR + pip_exe=/usr/local/bin/pip3.sh + fi + +cd /usr/local/bin +# --process-dependency-links +# this is missing many/most +# --format: invalid choice: 'legacy' (choose from 'columns', 'freeze', 'json') +$pip_exe list -o --format=columns --user | tee /tmp/$$.log +# pyface (Current: 4.5.2 Latest: 5.0.0 [sdist]) +grep 'wheel$\|sdist$' /tmp/$$.log | while read pkg current latest rest ; do + echo "INFO: $pkg from $current to $latest " + if [ -n "$PKGS" ] ; then + echo "$PKGS" | grep -v "grep" | grep -q "$pkg" || continue + fi + + # this is for the Msys distribution build from source + if [ -f ../src/$pkg.bash ] && grep VER= ../src/$pkg.bash ; then + [ -f ../src/$pkg.bash.old ] && WARN "$0 backup present $pkg.old" && continue + grep -q "^VER=\"$latest\"" ../src/$pkg.bash && \ + WARN "$0 $pkg already $latest" && continue + mv ../src/$pkg.bash ../src/$pkg.bash.old + sed -e "s/VER=$current/VER=$latest/" ../src/$pkg.bash < ../src/$pkg.bash.old + echo "INFO: package $pkg " + fi + # -u 2 + [ $inter -eq 0 ] && continue + + read -p "READ: Upgrade $pkg from $current to $latest? " yn + [ "$yn" = "q" ] && exit + [ "$yn" = "y" ] || continue + + $pip_exe $pkg $current $latest + done + +rm -f /tmp/$$.log +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_sheebang_after_pip.bash b/overlay/Linux/usr/local/bin/base_sheebang_after_pip.bash new file mode 100755 index 0000000..b2e90c8 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_sheebang_after_pip.bash @@ -0,0 +1,61 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local +ROLE=base + +[ -z "$BASE_PYTHON2_MINOR" ] && \ + BASE_PYTHON2_MINOR=$( python2 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$BASE_PYTHON3_MINOR" ] && \ + BASE_PYTHON3_MINOR=$( python3 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +for PYTHON_MINOR in "$BASE_PYTHON2_MINOR" "$BASE_PYTHON3_MINOR" ; do + [ -z "$PYTHON_MINOR" ] && continue +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib64 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR/site-packages ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR/site-packages +fi +done + +umask 0022 +# [ "$#" -eq 0 ] && set -- $PREFIX/bin + +# FixMe? /usr/local/bin too? I think not, except for ours? + +for prefix in /usr/local /var/local ; do + cd $prefix/bin || exit 1 + #? ls -1d * | grep -v '~' | xargs file | grep -i python | sed -e 's/:.*//'|while read file ; do + ls -1 | grep -v '~' | xargs file | grep script | sed -e 's/:.*//' | \ + while read file ; do + head -1 $file | grep -q python || continue + head -1 $file | grep -q $prefix/python..bash && continue + base=$( echo $file | sed -e 's/\.bash$//' ) + under=$( echo $prefix | sed -e 's/^.//' -e 's@/@_@g' ) + if [ -h /etc/python-exec/$base.conf ] ; then + link=$( readlink /etc/python-exec/$base.conf ) + if [ "$link" = python2.conf ] ; then + sed -f $prefix/share/sed/${under}_python2.sed -i $file + else + sed -f $prefix/share/sed/${under}_python3.sed -i $file + fi + else + sed -f $prefix/share/sed/${under}_python2.sed -i $file + sed -f $prefix/share/sed/${under}_python3.sed -i $file + fi + # echo $file + done + + # failsafe - Eberly - no longer active + for elt in $BASE_PYTHON2_MINOR $BASE_PYTHON3_MINOR ; do + [ -f $prefix/${LIB}/python$elt/site-packages/site.py ] + # WARN missing $prefix/${LIB}/python$elt/site-packages/site.py + done + +done + +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_shutdown.bash b/overlay/Linux/usr/local/bin/base_shutdown.bash new file mode 100755 index 0000000..dea4bf9 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_shutdown.bash @@ -0,0 +1,67 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +PREFIX=/usr/local + +. /usr/local/bin/usr_local_base.bash || exit 2 + +. ~/.bash_logout + +# these can hang unmounting partitions +pkill dirmngr +pkill bootlogd + +[ -x /var/local/bin/privacy_home_cleaner.bash ] && /var/local/bin/privacy_home_cleaner.bash + +[ -f ~/Makefile ] && grep -q ^stop: ~/Makefile && \ + { cd ~ ; make stop || exit 2 ; } + +local_base_umount () { + local mount + cd /mnt +mount=`mount` +for file in linux* ; do + echo $mount | grep -q " on /mnt/$file " || continue + echo /mnt/$file + umount -R /mnt/$file || exit 1 + done + +# not l - a b f d n u x i j k o q w e h z +for file in ? ; do + echo $mount | grep -q " on /mnt/$file " || continue + # echo /mnt/$file + umount /mnt/$file || WARN $prog error umounting /mnt/$file + done + umount -a +} + +local_base_umount || exit 3 + +# should be 0 +NUM=`losetup -a |grep -c -v home` +if [ $NUM -gt 0 ] ; then + losetup -a |grep -v home + echo losetup still mounted + exit 5 + fi + +sleep 10 +umount -a -t ntfs-3g + +# should be 1 +NUM=`ps ax | grep mount.ntfs-3g | grep -v grep | wc -l` +if [ $NUM -ge 1 ] ; then + ps ax | grep mount.ntfs-3g | grep -v grep + ERROR mount.ntfs-3g still running + exit 6 + fi + +INFO Calling shutdown + +if [ $# -lt 1 ] ; then + shutdown -r now + else + shutdown $* + fi diff --git a/overlay/Linux/usr/local/bin/base_testforge_perm.bash b/overlay/Linux/usr/local/bin/base_testforge_perm.bash new file mode 100755 index 0000000..fbb6c86 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_testforge_perm.bash @@ -0,0 +1,32 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +# very dangerous +[ "$#" -gt 0 ] && ROOT=$1 || ROOT=/ +[ -d "$ROOT" ] || exit 1 + +ROLE=base + +cd $ROOT || exit 2 +GROUP=adm +[ -f /usr/local/etc/testforge/testforge.bash ] && . /usr/local/etc/testforge/testforge.bash +[ -n "$BOX_ALSO_GROUP" ] && GROUP=$BOX_ALSO_GROUP + +if [ -d ${ROOT}/var/local ] ; then + # allow + chgrp -R $GROUP ${ROOT}/var/local/{bin,data,lib64,src,net} + chmod -R g+rw,o-w ${ROOT}/var/local/{bin,data,lib64,src,net} + chmod a+x ${ROOT}/var/local/{bin,src,share/bash}/*sh + # if [ -d ${ROOT}/var/local/src/lynis ] ; then + + chgrp -R $GROUP ${ROOT}/var/local/{bin,data,lib64,src,net} + # forbid /var + chgrp -R root ${ROOT}/var/local/{etc,var,share} + chmod -R g-w,o-w ${ROOT}/var/local/{etc,var,share} + fi +if [ -d ${ROOT}/usr/local ] ; then + # forbid /usr but lib/python* will be created and allowed on install + chgrp -R root ${ROOT}/usr/local/ + chmod -R g-w,o-rw ${ROOT}/usr/local/ +fi +exit 0 diff --git a/overlay/Linux/usr/local/bin/base_wall.bash b/overlay/Linux/usr/local/bin/base_wall.bash new file mode 100755 index 0000000..f7f3f45 --- /dev/null +++ b/overlay/Linux/usr/local/bin/base_wall.bash @@ -0,0 +1,56 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +exit 0 +ROLE=base +usage=" +Usage: + wall [options] [message] + +Write a message to all users. + +Options: + -n, --nobanner do not print banner + -h, --help display this help and exit +" + +SHORT=nh +LONG=nobanner,help + +PARSED=$(getopt --options $SHORT --longoptions $LONG --name "$0" -- "$@") +if [[ $? -ne 0 ]]; then + echo "$usage" + exit 2 +fi +eval set -- "$PARSED" + +while true; do + case "$1" in + -n|--nobanner) + n=y + shift + ;; + -h|--help) + echo "$usage" + exit 0 + ;; + --) + shift + break + ;; + *) + exit 3 + ;; + esac +done + +ps -ef | grep " pts/" | awk '{print $6}' | sort -u > /tmp/terminals_$$.tmp +ps -ef | grep " tty" | awk '{print $6}' | sort -u | grep -v "pts" >> /tmp/terminals_$$.tmp +if [ "$n" ]; then + pre="" + post="" +else + pre="-e \nBroadcast message from $(whoami)@$(hostname) ($(ps ax | grep "^$$" | awk '{ print $2 }')) ($(date +"%a %b %d %H:%M:%S %Y")):\n\n" + post='\n' +fi +cat /tmp/terminals_$$.tmp | while read TTY_TO; do echo $pre"$*"$post | sudo tee /dev/$TTY_TO 1>/dev/null; done +rm /tmp/terminals_$$.tmp diff --git a/overlay/Linux/usr/local/bin/bash_to_bash.bash b/overlay/Linux/usr/local/bin/bash_to_bash.bash new file mode 100755 index 0000000..ed89da8 --- /dev/null +++ b/overlay/Linux/usr/local/bin/bash_to_bash.bash @@ -0,0 +1,9 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter +ROLE=base +# extra cleanups to bash from yaml_to_bash +sed -e '/\[/s@, @ @g' \ + -e '/\[/s@\([^"]\)u"@\1"@g' -e "/\[/s@\([^']\)u'@\1'@g" \ + -e 's@="*\[\(.*\)\]@=(\1)@' -e "s@='*\[\(.*\)\]@=(\1)@" + diff --git a/overlay/Linux/usr/local/bin/fact_to_bash.bash b/overlay/Linux/usr/local/bin/fact_to_bash.bash new file mode 100755 index 0000000..c2f3014 --- /dev/null +++ b/overlay/Linux/usr/local/bin/fact_to_bash.bash @@ -0,0 +1,29 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# N.B.: creates /usr/local/etc/testforge/testforge.bash + +# filter or program +# should be -f VAR_LOCAL/share/sed/fact_to_bash.sed +# but /usr/local/etc/testforge/testforge.bash isnt created yet + +ROLE=base + +# wierd: doesnt work on Ubuntu - grep -F -e '=' $* | sed -e 's@^ *@@' | eval +grep '=' $* | sed \ +-e "s@u*'@@g" \ +-e 's@^ *@@' \ +-e 's@\[@"@' \ +-e 's@\]@"@' \ +-e 's@, @ @g' \ + > /tmp/$$.bash +. /tmp/$$.bash + + + +IFS='\t' sed -e 's/=/\t/' -e 's/"//g' /tmp/$$.bash |sort -u | while read key val ; do +# why filter these out? +# echo $key | grep -q 'SOCKS_PROXY\|NO_PROXY\|HTTP_PROXY\|HTTPS_PROXY\|GIT_' && continue + echo "export $key=\"$val\"" + done + +# rm /tmp/$$.bash diff --git a/overlay/Linux/usr/local/bin/fact_to_yaml.bash b/overlay/Linux/usr/local/bin/fact_to_yaml.bash new file mode 100755 index 0000000..1c37ce5 --- /dev/null +++ b/overlay/Linux/usr/local/bin/fact_to_yaml.bash @@ -0,0 +1,8 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base + +# filter or program +grep '=' "$*" \ +| sed -e "s@=@: @" -e "s@^ *@@" diff --git a/overlay/Linux/usr/local/bin/pip.sh b/overlay/Linux/usr/local/bin/pip.sh new file mode 100755 index 0000000..798f5ed --- /dev/null +++ b/overlay/Linux/usr/local/bin/pip.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +shopt -s nullglob || { ERROR use bash ; exit 1 ; } +. /usr/local/bin/usr_local_tput.bash || exit 2 +. /usr/local/bin/usr_local_base.bash || exit 3 + +ROLE=base +PREFIX=/usr/local +[ -z "$PYVER" ] && PYVER=3 +declare -a TARGET + +if [ -f /usr/local/etc/testforge/testforge.bash ] ; then + . /usr/local/etc/testforge/testforge.bash >/dev/null || exit 1 + P="BASE_PYTHON${PYVER}_MINOR" + PYTHON_MINOR="$(eval echo \$$P)" + fi + +[ -n "$PYTHON_MINOR" ] || \ + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$LIB" -a -d $PREFIX/lib/python$PYTHON_MINOR/site-packages ] && LIB=lib +[ -z "$LIB" -a -d $PREFIX/lib64/python$PYTHON_MINOR/site-packages ] && LIB=lib64 + +if [ "$#" -eq 0 ] || [[ "$*" =~ "--version" ]] || [[ "$*" =~ "--help" ]] ; then + $PREFIX/bin/python$PYVER.sh -m pip "$@" + exit $? +elif [ "$1" = 'html' ] ; then + wget -c -O - https://pypi.org/project/$2 2>/dev/null + exit $? +elif [ "$1" = 'lynx' ] ; then + lynx https://pypi.org/project/$2 + exit $? +elif [ "$1" = 'elinks' ] ; then + elinks https://pypi.org/project/$2 + exit $? +fi + +if [ -x $PREFIX/bin/base_check_site_py.bash ] ; then + $PREFIX/bin/base_check_site_py.bash $PYTHON_MINOR >/dev/null || exit $? +fi + +if [ -n "$PYTHONPATH" ] && [ -x $PREFIX/bin/base_clean_pythonpath.bash ] ; then + PYTHONPATH="$( $PREFIX/bin/base_clean_pythonpath.bash $PYTHON_MINOR $PYTHONPATH )" +fi + +# could from pip import download;print(download.__file__) +file=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages/pip/download.py +if [ -f $file ] && grep -q 'if not check_path_owner' $file ; then + mv $file $file.dst + sed -e 's/if not check_path_owner/if False and not check_path_owner/' \ + > $file $file.dst + fi + +#DBUG $prog PYTHON_MINOR=$PYTHON_MINOR PYTHONPATH=$PYTHONPATH + +LARGS="$BASE_PIP_GLOBAL_ARGS" # --no-python-version-warning +if [ -f /usr/local/etc/ssl/cacert-testforge.pem ] ; then + [[ "$*" =~ "--cert" ]] || [[ $LARGS =~ "--cert" ]] || LARGS="--cert $PREFIX/etc/ssl/cacert-testforge.pem $LARGS" + fi +if [ -e $PREFIX/net/Cache/Pip ] ; then + [[ "$*" =~ "--cache-dir" ]] || [[ $LARGS =~ "--cache-dir" ]] || LARGS="--cache-dir $PREFIX/net/Cache/Pip $LARGS" +fi + +[[ "$*" =~ "--timeout" ]] || [[ $LARGS =~ "--timeout" ]] || LARGS="--timeout=30 $LARGS" +[[ "$*" =~ '--disable-pip-version-check' ]] || LARGS="--disable-pip-version-check $LARGS" +[[ "$*" =~ '--proxy' ]] || LARGS="$LARGS --proxy http://localhost:3128" + +MYID=$( id -u ) +if [ "$1" = 'uninstall' ] ; then + [ $MYID -eq 0 ] && ERROR $prog should not be run as root $MYID && exit 2 + +elif [ "$1" = 'install' ] ; then + [ $MYID -eq 0 ] && ERROR $prog should not be run as root $MYID && exit 2 + shift + RARGS="$RARGS --progress-bar=off" +# LARGS="$LARGS --python=/usr/local/bin/python$PYTHON_MINOR.sh" + /usr/local/bin/proxy_ping_test.bash wifi # || exit 3$? + # Can not combine '--user' and '--prefix' + if true ; then # >9.0.1 + if [[ $RARGS =~ "--prefix=$PREFIX" ]] ; then + : + else + [ $MYID -eq 0 ] && ERROR $prog should not be run as root $MYID && exit 2 + RARGS=" --prefix=$PREFIX $RARGS" + fi + else + # this is required, with the ~/.local symlinks, or it tries to uninstall from the system + [[ $RARGS =~ " --user" ]] || RARGS=" --user $RARGS" + # no quotes around the --install-option arg + [[ $RARGS =~ "--install-scripts" ]] || RARGS=" --install-option=--install-scripts=/usr/local/bin $RARGS" + [[ $RARGS =~ "--install-lib" ]] || RARGS=" --install-option=--install-lib=/usr/local/$LIB/python$PYTHON_MINOR/site-packages $RARGS" + fi +# if [ -d /etc/apt ] ; then # ! uname -a | grep Debian || +# [[ $RARGS =~ "--install-layout" ]] || RARGS=" --install-option=--install-layout=unix $RARGS" +# fi +#? [[ $RARGS =~ "--no-binary" ]] || RARGS="--no-binary :all: $RARGS" + # this prohibits installing .egg dirs but maybe that means no multi-version + [[ $RARGS =~ "--only-binary" ]] || RARGS="--only-binary :none: $RARGS" + ! $PREFIX/bin/python$PYVER.sh -m pip --help | grep -q upgrade-strategy || \ + [[ $RARGS =~ "--upgrade-strategy" ]] || RARGS="--upgrade-strategy only-if-needed $RARGS" + # require explicit package-by package installing - ? maybe only from ansible? + RARGS="install $RARGS" + export PYTHONPATH=/usr/local/$LIB/python$PYTHON_MINOR/site-packages +fi +TARGET=("$@") + +echo DBUG $prog $LARGS $RARGS "$@" +exec $PREFIX/bin/python$PYVER.sh -W ignore::UserWarning -m pip $LARGS $RARGS "$@" 2>&1 diff --git a/overlay/Linux/usr/local/bin/pip2.sh b/overlay/Linux/usr/local/bin/pip2.sh new file mode 100755 index 0000000..41751f7 --- /dev/null +++ b/overlay/Linux/usr/local/bin/pip2.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +shopt -s nullglob || { ERROR use bash ; exit 1 ; } +ROLE=base + +export PYVER=2 +exec /usr/local/bin/pip.sh "$@" diff --git a/overlay/Linux/usr/local/bin/pip3.11.sh b/overlay/Linux/usr/local/bin/pip3.11.sh new file mode 100644 index 0000000..7d563bd --- /dev/null +++ b/overlay/Linux/usr/local/bin/pip3.11.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +. /usr/local/bin/usr_local_tput.bash || exit 2 +. /usr/local/bin/usr_local_base.bash || exit 3 +shopt -s nullglob || { ERROR use bash ; exit 1 ; } + +ROLE=base +PREFIX=/usr/local +PYVER=3 +declare -a TARGET + +if [ -f /usr/local/etc/testforge/testforge.bash ] ; then + . /usr/local/etc/testforge/testforge.bash >/dev/null || exit 1 + P="BASE_PYTHON${PYVER}_MINOR" + PYTHON_MINOR="$(eval echo \$$P)" + fi + +[ -n "$PYTHON_MINOR" ] || \ + PYTHON_MINOR=$( python3.10 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +PYTHON_MINOR=3.11 + +[ -z "$LIB" -a -d $PREFIX/lib/python$PYTHON_MINOR/site-packages ] && LIB=lib +[ -z "$LIB" -a -d $PREFIX/lib64/python$PYTHON_MINOR/site-packages ] && LIB=lib64 + +if [ "$#" -eq 0 ] || [[ "$*" =~ "--version" ]] || [[ "$*" =~ "--help" ]] ; then + $PREFIX/bin/python$PYVER.sh -m pip "$@" + exit $? +elif [ "$1" = 'html' ] ; then + wget -c -O - https://pypi.org/project/$2 2>/dev/null + exit $? +elif [ "$1" = 'lynx' ] ; then + lynx https://pypi.org/project/$2 + exit $? +elif [ "$1" = 'elinks' ] ; then + elinks https://pypi.org/project/$2 + exit $? +fi + +if [ -x $PREFIX/bin/base_check_site_py.bash ] ; then + $PREFIX/bin/base_check_site_py.bash $PYTHON_MINOR >/dev/null || exit $? +fi + +if [ -n "$PYTHONPATH" ] && [ -x $PREFIX/bin/base_clean_pythonpath.bash ] ; then + PYTHONPATH="$( $PREFIX/bin/base_clean_pythonpath.bash $PYTHON_MINOR $PYTHONPATH )" +fi + +# could from pip import download;print(download.__file__) +file=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages/pip/download.py +if [ -f $file ] && grep -q 'if not check_path_owner' $file ; then + mv $file $file.dst + sed -e 's/if not check_path_owner/if False and not check_path_owner/' \ + > $file $file.dst + fi + +#DBUG $prog PYTHON_MINOR=$PYTHON_MINOR PYTHONPATH=$PYTHONPATH + +LARGS="$BASE_PIP_GLOBAL_ARGS" # --no-python-version-warning +if [ -f /usr/local/etc/ssl/cacert-testforge.pem ] ; then + [[ "$*" =~ "--cert" ]] || [[ $LARGS =~ "--cert" ]] || LARGS="--cert $PREFIX/etc/ssl/cacert-testforge.pem $LARGS" + fi +if [ -e $PREFIX/net/Cache/Pip ] ; then + [[ "$*" =~ "--cache-dir" ]] || [[ $LARGS =~ "--cache-dir" ]] || LARGS="--cache-dir $PREFIX/net/Cache/Pip $LARGS" +fi + +[[ "$*" =~ "--timeout" ]] || [[ $LARGS =~ "--timeout" ]] || LARGS="--timeout=30 $LARGS" +[[ "$*" =~ '--disable-pip-version-check' ]] || LARGS="--disable-pip-version-check $LARGS" +[[ "$*" =~ '--proxy' ]] || LARGS="$LARGS --proxy localhost:3128" + +MYID=$( id -u ) +if [ "$1" = 'uninstall' ] ; then + [ $MYID -eq 0 ] && ERROR $prog should not be run as root $MYID && exit 2 + +elif [ "$1" = 'install' ] ; then + shift + /usr/local/bin/proxy_ping_test.bash wifi # || exit 3$? + RARGS="$BASE_PIP_INSTALL_ARGS" + # Can not combine '--user' and '--prefix' + if true ; then # >9.0.1 + if [[ $RARGS =~ "--prefix=$PREFIX" ]] ; then + : + else + [ $MYID -eq 0 ] && ERROR $prog should not be run as root $MYID && exit 2 + RARGS=" --prefix=$PREFIX $RARGS" + fi + else + # this is required, with the ~/.local symlinks, or it tries to uninstall from the system + [[ $RARGS =~ " --user" ]] || RARGS=" --user $RARGS" + # no quotes around the --install-option arg + [[ $RARGS =~ "--install-scripts" ]] || RARGS=" --install-option=--install-scripts=/usr/local/bin $RARGS" + [[ $RARGS =~ "--install-lib" ]] || RARGS=" --install-option=--install-lib=/usr/local/$LIB/python$PYTHON_MINOR/site-packages $RARGS" + fi +# if [ -d /etc/apt ] ; then # ! uname -a | grep Debian || +# [[ $RARGS =~ "--install-layout" ]] || RARGS=" --install-option=--install-layout=unix $RARGS" +# fi +#? [[ $RARGS =~ "--no-binary" ]] || RARGS="--no-binary :all: $RARGS" + # this prohibits installing .egg dirs but maybe that means no multi-version + [[ $RARGS =~ "--only-binary" ]] || RARGS="--only-binary :none: $RARGS" + ! $PREFIX/bin/python$PYVER.sh -m pip --help | grep -q upgrade-strategy || \ + [[ $RARGS =~ "--upgrade-strategy" ]] || RARGS="--upgrade-strategy only-if-needed $RARGS" + # require explicit package-by package installing - ? maybe only from ansible? + RARGS="install $RARGS" + export PYTHONPATH=/usr/local/$LIB/python$PYTHON_MINOR/site-packages +fi +TARGET=("$@") + +echo DBUG $prog $LARGS $RARGS "$@" +exec $PREFIX/bin/python$PYVER.sh -W ignore::UserWarning -m pip $LARGS $RARGS "$@" 2>&1 diff --git a/overlay/Linux/usr/local/bin/pip3.sh b/overlay/Linux/usr/local/bin/pip3.sh new file mode 100755 index 0000000..9fc7f26 --- /dev/null +++ b/overlay/Linux/usr/local/bin/pip3.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +shopt -s nullglob || { ERROR use bash ; exit 1 ; } +ROLE=base + +export PYVER=3 +exec /usr/local/bin/pip.sh "$@" diff --git a/overlay/Linux/usr/local/bin/proxy_ping_test.bash b/overlay/Linux/usr/local/bin/proxy_ping_test.bash new file mode 100755 index 0000000..f0fa7d4 --- /dev/null +++ b/overlay/Linux/usr/local/bin/proxy_ping_test.bash @@ -0,0 +1,974 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local +ROLE=proxy +PYVER=3 + +# DEBUG=1 + +. /usr/local/bin/proxy_ping_lib.bash || \ + { ERROR loading /usr/local/bin/proxy_ping_lib.bash ; exit 6; } +PL=/usr/local/bin/proxy_libvirt_lib.bash +declare -a tests + +which traceroute 2>/dev/null >/dev/null && HAVE_TRACEROUTE=1 || HAVE_TRACEROUTE=0 +which dig 2>/dev/null >/dev/null && HAVE_DIG=1 || HAVE_DIG=0 +which nslookup 2>/dev/null >/dev/null && HAVE_NSLOOKUP=1 || HAVE_NSLOOKUP=0 +which tor-resolve 2>/dev/null >/dev/null && HAVE_TOR_RESOLVE=1 || HAVE_TOR_RESOLVE=0 + +[ -z "$prog" ] || prog=proxy_ping_test +proxy_ping_get_socks +[ -z "$SOCKS_HOST" ] && SOCKS_HOST=127.0.0.1 +[ -z "$SOCKS_PORT" ] && SOCKS_PORT=9050 +[ -z "$SOCKS_DNS" ] && SOCKS_DNS=9053 +HTTPS_PORT=9128 +HTTPS_HOST=127.0.0.1 +proxy_ping_get_https +[ -z "$HTTPS_HOST" ] && HTTPS_HOST=127.0.0.1 +HTTP_PORT=3128 +HTTP_PROXY_HOST=127.0.0.1 +proxy_ping_get_http +[ -z "$HTTP_HOST" ] && HTTP_HOST=127.0.0.1 + +[ -f $PREFIX/etc/testforge/testforge.bash ] && \ + . /usr/local/etc/testforge/testforge.bash >/dev/null || exit 1 + +P="BASE_PYTHON${PYVER}_MINOR" +PYTHON_MINOR="$(eval echo \$$P)" +[ -n "$PYTHON_MINOR" ] || \ + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -n "$PYTHON_MINOR" ] || exit 4 + +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR ] ; then + LIB=lib64 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR ] ; then +#? ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR + exit 5 +fi + +THOPS=40 +NEEDED_BINS="ping traceroute nmap dig nslookup tor-resolve" +NEEDED_SCRIPTS=" +/usr/local/bin/proxy_ping_lib.bash +/usr/local/bin/proxy_ping_test.bash +" + +grep -q Debian /etc/os-release +DEBIAN=$? +TIMEOUT=30 +[ -n "$GATEW_DOM" ] || GATEW_DOM="$( proxy_testforge_get_gateway_dom )" +[ -n "$GATEW_DOM" ] || GATEW_DOM="Whonix-Gateway" + +DNS_HOST1="208.67.220.220" +DNS_HOST2="8.8.8.8"ggggg +[ -n "$DNS_TARGET" ] || DNS_TARGET=www.whatismypublicip.com # 108.160.151.39 +[ -n "$HTTP_TARGET" ] || HTTP_TARGET=www.whatismypublicip.com # 108.160.151.39 +HTTP_TARGET=www.whatismypublicip.com + +# time.nist.gov 132.163.97.3 +NTP_HOST1=132.163.97.3 +# pool.ntp.org 78.46.53.2 +NTP_HOST2=78.46.53.2 +# --no-check-certificate +WGET="wget --tries=1 --max-redirect=0 --timeout=$TIMEOUT -O /dev/null" +CURL="curl -o /dev/null $CURL_ARGS" +SCURL="/usr/local/bin/scurl.bash --output /dev/null" +NSL='nslookup -querytype=A -debug' +NETS='netstat -nl4e' +ALL="" + +[ -z "$USER" ] && USER=$(id -un ) +[ $USER = root ] && DMESG_LINES=1 || DMESG_LINES=0 +[ -n "$PROXY_WLAN" ] || PROXY_WLAN=`proxy_ping_get_wlan` +# fixme - required +PROXY_WLAN=$( echo $PROXY_WLAN | grep ^wlan |sed -e 's/:.*//' ) + +[ -n "$PROXY_WLAN_GW" ] || PROXY_WLAN_GW=`proxy_ping_get_wlan_gw` +# fixme - required +PROXY_WLAN_GW=$( echo $PROXY_WLAN_GW | grep ^wlan |sed -e 's/:.*//' ) +MODE=$( proxy_ping_mode ) +USAGE="$prog without arguments tests the current MODE=$MODE, +or 0 to list the tests by number, +or one or more of the groups: + +" + +DNS_HOST=$SOCKS_HOST +[ -z "$PRIV_BIN_OWNER" ] && PRIV_BIN_OWNER=bin +[ -z "$PRIV_BIN_GID" ] && PRIV_BIN_GID=$( grep ^$PRIV_BIN_OWNER /etc/passwd|cut -d: -f 4 ) + +## proxy_test_netstat_dns +proxy_test_netstat_dns () { DBUG proxy_test_netstat_dns $* ; + $NETS | grep -q ":53" + retval=$? + [ $retval -eq 0 ] && return 0 + ERROR $prog test=$ARG "${tests[$ARG]}" dns not running + [ -z "$ALL" ] && exit $ARG$retval || return 1 +} + +## proxy_test_traceroute_icmp_gw +proxy_test_traceroute_icmp_gw () { DBUG proxy_test_traceroute_icmp_gw $* ; + [ -n "$PROXY_WLAN_GW" ] || PROXY_WLAN_GW=`proxy_ping_get_wlan_gw` || return 1 + traceroute --icmp $PROXY_WLAN_GW + retval=$? + [ $retval -eq 0 ] && return 0 + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval traceroute --icmp $PROXY_WLAN_GW + [ -z "$ALL" ] && exit $ARG$retval || return 1 + # works + GREP="-i icmp" + return 0 +} + +## proxy_test_dig_direct +proxy_test_dig_direct () { DBUG proxy_test_dig_direct $* ; + + dig @$DNS_HOST1 pool.ntp.org +timeout=$TIMEOUT >/dev/null + retval=$? + [ $retval -eq 0 ] && return 0 + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval dig @$DNS_HOST1 + [ -z "$ALL" ] && exit $ARG$retval || return 1 + + INFO $prog test=$ARG "${tests[$ARG]}" dig @$DNS_HOST1 + # works + GREP="53" + return 0 +} + +## proxy_test_curl_firewall_bin +proxy_test_curl_firewall_bin () { DBUG proxy_test_curl_firewall_bin $* ; + su -c "$CURL -k --noproxy '*' https://$HTTP_TARGET" -s /bin/sh $PRIV_BIN_OWNER >/dev/null + retval=$? + [ $retval -eq 0 ] && return 0 + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval \ + su -c "$CURL -k --noproxy '*' https://$HTTP_TARGET" -s /bin/sh $PRIV_BIN_OWNER + proxy_iptables_save|tail|grep PTABLES_filter_DROP-o + [ -z "$ALL" ] && exit $ARG$retval || return $retval +} + +## proxy_ping_curl +proxy_ping_curl () { DBUG proxy_ping_curl $* ; + local retval + timeout -k $TIMEOUT $TIMEOUT $CURL "$@" + retval=$? + # "DEBUG: wierd failure curl: (35) Encountered end of file" + [ $retval -eq 0 -o $retval -eq 35 ] && return 0 + return $retval +} + +## proxy_ping_make_help +proxy_ping_make_help () { + grep 'tests\[[0-9][0-9]*\]=' /usr/local/bin/proxy_ping_test.bash \ + > /tmp/proxy_ping_test.hlp + return 0 +} + +## proxy_ping_test_virbr +proxy_ping_test_virbr () { + local n=$1 + [ -z "$n" ] && n=1 + [ -z "$CONN" ] || proxy_whonix_get_conn + [ "$CONN" = guest ] && return 0 + [ -e /proc/sys/net/ipv4/conf/virbr$n ] || return 0 + proxy_ifconfig virbr$n >/dev/null && return 0 + return 0 +} + +## proxy_ping_broken +proxy_ping_broken () { DBUG proxy_ping_broken PROXY_WLAN=$PROXY_WLAN $* ; + # 0 is true + local a=$MODE + if [ "$a" = vda -o "$a" = ws ]; then + # grep 10.152.152.10 /etc/resolv.conf && + PING_BROKEN=0 + return 0 + elif [ "$a" = gateway ]; then + PING_BROKEN=0 + return 0 + elif [ -z "$PROXY_WLAN_GW" ] ; then + PING_BROKEN=0 + return 0 + fi + + [ -n "$PING_BROKEN" ] && return $PING_BROKEN + + DBUG $prog proxy_ping_mode=$a PROXY_WLAN=$PROXY_WLAN PROXY_WLAN_GW=$PROXY_WLAN_GW + ping -4 -I $PROXY_WLAN -c 1 -W $TIMEOUT $PROXY_WLAN_GW # 10.16.238.1 + if [ $? -ne 0 ] ; then + PING_BROKEN=0 + else + PING_BROKEN=1 + fi + return $PING_BROKEN +} + +## proxy_do_ping +proxy_do_ping () { DBUG proxy_do_ping $* ; + proxy_route_check || { ERROR $prog route not connected ; return 1$? ; } + + proxy_ping_broken && return 0 + + [ -n "$PROXY_WLAN" ] || PROXY_WLAN=`proxy_get_if` || { + ERROR $prog unable to get wlan $? ; return 2 ; + } + + ping -4 -I $PROXY_WLAN -c 1 -W $TIMEOUT $DNS_HOST2 >/tmp/P$$.log 2>&1 + retval=$? + if [ $retval -eq 1 ] ; then + # false negatives + sleep 4 + ping -4 -I $PROXY_WLAN -c 1 -W $TIMEOUT $DNS_HOST2 >/tmp/P$$.log 2>&1 + retval=$? + fi + [ $retval -lt 1 ] || { + ERROR $prog do_ping $PROXY_WLAN retval=$retval + rm /tmp/P$$.log + PING_BROKEN=0 + return 3$retval + } + grep -q ' 0% ' /tmp/P$$.log || \ + { ERROR $prog retval=$? test=$1 ping retval=$retval ; rm /tmp/P$$.log ; return 4 ; } + PING=1 + grep 'packet\|bytes from' /tmp/P$$.log + rm /tmp/P$$.log + return 0 +} + +proxy_run_as_root () { DBUG proxy_run_as_root $* ; + [ $( id -u ) -eq 0 ] && return 0 + ERROR must be root + [ -z "$ALL" ] && exit 9 + return 1 +} + +## proxy_test_pretests +proxy_test_pretests () { + if [ "$1" = panic ] ; then + : dont ping on panic + proxy_ping_broken || proxy_do_ping || \ + { WARN ping failed for panic so skipping ; exit 0 ; } + elif [ "$1" = direct -o "$1" = gateway -o "$1" = vda -o "$1" = kick ] ; then + proxy_route_test || { ERROR $prog route not connected ; exit 1$? ; } + proxy_ping_broken || proxy_do_ping || exit 3$? + proxy_ping_test_resolv $MODE ||\ + { WARN $prog proxy_ping_test_resolv=$? 'echo nameserver 127.0.0.1 > /etc/resolv.conf' ; exit 4 ; } + proxy_ping_firewall_start || { ERROR "proxy_ping_firewall_start ret=$?" ; exit 5 ; } + elif [ "$1" = nat ] ; then + proxy_route_test || { ERROR $prog route not connected ; exit 1$? ; } + else + proxy_do_ping || exit 4$? + proxy_ping_test_resolv $MODE || \ + { WARN "$prog proxy_ping_test_resolv=$? /etc/resolv.conf.$dire" MODE=$MODE + exit 4 ; } + + fi + return 0 +} + +## proxy_test_help_args +proxy_test_help_args () { + declare -a ret=() + ret=( $(grep " -.* $1 " /tmp/proxy_ping_test.hlp | \ + sed -e 's/.=.*//' -e 's/.*tests.//') ) + echo "${ret[@]}" + return 0 +} + +ALL=0 +## proxy_ping_test_set_args +proxy_ping_test_set_args () { + local args="$@" + local val="$@" + declare -a aret=() + rm -f /tmp/proxy_ping_test.hlp + [ -f /tmp/proxy_ping_test.hlp ] || proxy_ping_make_help +## to_tor - tor with the firewall host side client setup tor server - call tor,dns,ntp in addition +[ "$1" = to_tor -o "$1" = test_tor -o "$1" = test_to ] && + aret=( 6 13 16 ) && \ + ! proxy_ping_test_env && WARN to_tor and no proxy in env - use noenv + +## vda - through the Gateway with the firewall - also polipo,panic - uses env +[ "$1" = vda ] && + aret=( 35 3 20 ) # +## tor - tor with the firewall to test the host side tor server - call to_tor,dns,ntp in addition +[ "$1" = tor ] && + aret=( 21 30 20 4 5 36 3 ) +## kick - open firewall with tor running - call dns,polipo +tor in addition +[ "$1" = kick -o "$1" = host ] && + aret=( 24 31 13 16 6 )# 30 24 31 6 13 16 +## gateway - on the Gateway, trans firewall with tor running - call dns in addition +[ "$1" = gateway ] && + aret=( 23 25 4 5 30 24 17 3 21 ) # 31 6 16 + +# aliases +[ "$1" = "$SOCKS_PORT" ] && set -- socks +[ "$1" = "$HTTP_PORT" ] && set -- http +[ "$1" = "$HTTPS_PORT" ] && set -- https +[ "$1" = "53" ] && set -- dns +[ "$1" = "9053" ] && set -- tordns + +[ "$1" = scan ] && set -- iwlist +[ "$1" = panic ] && set -- firewall +[ "$1" = tor ] && set -- torhost +[ "$1" = to_gateway ] && set -- whonix +[ "$1" = from_tor ] && set -- whonix +[ "$1" = from_gateway ] && set -- gateway +[ "$1" = traceroute ] && set -- = trace +[ "$1" = connected ] && set -- wifi +[ "$1" = clear ] && set -- direct + +# scenarios - modes: nat selektor +## nat - through the Gateway via the nat +[ "$1" = nat ] && \ + set -- ping dns socks http https tordns firefail libvirtguest +# wifi? +[ "$1" = whonix ] && \ + set -- ping tordns dns socks http https torhost tordns firefail gw +[ "$1" = tor ] && \ + set -- ping tordns dns trace socks http https torhost tordns firefail nmap gw +[ "$1" = selektor ] && \ + set -- ping tordns dns trace socks http https torhost tordns firefail nmap gw +[ "$1" = direct -o "$1" = '' ] && \ + set -- ping dns trace nmap gw + +## all - all tests not stopping on the first error +[ "$1" = all ] && ALL=1 +# aret="${#tests[@]}" + +## gw - test if we are connected to the gateway +## torhost - running tor with the firewall +## env - from the cmdline with a properly setup env +## firefail - test the proxy without env vars to expect failure +## http - assumes torhost or whonix and env setup +## https - assumes torhost or whonix and env setup +## socks - assumes torhost or whonix and env setup +## ping - connected routed test the ping to DNS hosts +## ntp - ntpdate through the firewall +## nmap - nmap sgid through the firewall - does not assume env +## iwlist - wlan scan +## firewall - test that the firewall blocks +## virbr1 - assumes tor or whonix +## gateway - ssh to the whonix gateway +## trace - traceroute to DNSHOST - icmp is allowed by the firewall, except on vda +## wifi - test if we are connected - call scan in addition +## libvirthost - hosting a libvirt container +## libvirtguest - in a libvirt container +## tordns - test 9053 for dns using tor-resolve +## dns - dns using tor or the gateway, with the firewall - does not assume env +## whonix - whonix to the Gateway with the firewall - also panic - not assume env +## whonix - whonix gateway host side client setup with the firewall was from_to## direct - assume no firewall and no proxy - but may work depend on env +r + for elt in "$@" ; do + if [ "$elt" = gw -o "$elt" = '' -o "$elt" = env -o \ + "$elt" = https -o "$elt" = http -o "$elt" = socks -o "$elt" = dns -o \ + "$elt" = torhost -o "$elt" = tordns -o "$elt" = whonix -o \ + "$elt" = libvirthost -o "$elt" = libvirtguest -o "$elt" = virbr1 -o \ + "$elt" = ping -o "$elt" = trace -o "$elt" = ntp -o "$elt" = nmap -o \ + "$elt" = iwlist -o "$elt" = firefail -o "$elt" = direct -o \ + "$elt" = trace -o "$elt" = wifi -o "$elt" = '' -o "$elt" = '' \ + ] ; then + aret+=( `proxy_test_help_args $elt` ) + else + WARN unrecognized: $elt >&2 + fi + done + + DBUG "${aret[@]}" >&2 + echo "${aret[@]}" + return 0 +} + +# -I $PROXY_WLAN -c 1 $DNS_HOST2 +if [ "$#" = 0 ] ; then + # default to mode + set -- $MODE + fi +if [ $1 = '-h' -o $1 = '--help' ] ; then + echo USAGE: $USAGE | sed -e 's/[0-9][0-9]*)/\n&/g' + grep '^## [a-oq-z]' $0 | sed -e 's/^## / /' + exit 0 + elif [ "$1" = 0 ] ; then + INFO $prog PROXY_WLAN=$PROXY_WLAN MODE=$MODE + echo 0 help /tmp/proxy_ping_test.hlp + [ -f /tmp/proxy_ping_test.hlp ] || proxy_ping_make_help + . /tmp/proxy_ping_test.hlp + for elt in "${!tests[@]}" ; do + echo $elt "${tests[$elt]}" + done + exit 0 + elif [[ $1 =~ ^[0-9] ]] ; then + : passthrough + else + set -- `proxy_ping_test_set_args "$@"` + DBUG running tests numbered "$@" + fi +proxy_route_test || { ERROR $prog route not connected ; exit 1$? ; } + +proxy_test_pretests "$1" + +# https://stackoverflow.com/questions/8290046/icmp-sockets-linux/20105379#20105379 +if [ $( id -u ) -eq 0 ] ; then + proxy_ping_chattr + fi + +DBUG $prog PROXY_WLAN=$PROXY_WLAN MODE=$MODE $* +# $( sysctl net.ipv4.ping_group_range ) + +# proxy_iptables_save|grep 216 + +while [ "$#" -gt 0 ] ; do + # DBUG $prog $1 + ARG=$1 ; shift + + GREP="" + if [ -z "$ARG" ] ; then + continue + + elif ! [ "$ARG" -ge 0 ] ; then + ERROR $prog called with an unrecognized argument $ARG from $0 + exit 9 + + elif [ $ARG -le 0 ] ; then + # do the ping and resov.conf + true + + elif [ $ARG -eq 1 ] ; then + tests[1]="wget_https_as_user wget ${HTTPS_PORT} - https " + [ -n "$https_proxy" ] && LARGS="" || \ + LARGS="env https_proxy=https://${HTTPS_HOST}:${HTTPS_PORT}" + $LARGS $WGET https://$HTTP_TARGET + retval=$? + if [ $retval -eq 8 -o $retval -eq 0 ] ; then + INFO $prog test=$ARG "${tests[$ARG]}" + else + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval test=$ARG + [ -z "$ALL" ] && continue + fi + # works with fix + GREP="${HTTPS_PORT}" + + elif [ $ARG -eq 2 ] ; then + [ -n "$https_proxy" ] && LARGS="--proxy $https_proxy" || \ + LARGS="--proxy https://${HTTPS_HOST}:${HTTPS_PORT}" + tests[2]="curl_https_as_user curl $LARGS https://$HTTP_TARGET - https " + proxy_ping_curl $LARGS https://$HTTP_TARGET >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl $LARGS https://$HTTP_TARGET + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works with fix + GREP="${HTTPS_PORT}" + + elif [ $ARG -eq 3 ] ; then + tests[3]="curl_socks_virbr1_as_user $SOCKS_HOST $SOCKS_PORT - torhost " + # proxy_dest_port_wlan_config || { ERROR DEST=$DEST ; continue ; } + + # curl: (4) A requested feature, protocol or option was not found built-in in this libcurl due to a build-time decision + [ $DEBIAN -eq 0 ] && continue + + [ -z "$socks_proxy" ] && socks_proxy=socks5h://${SOCKS_HOST}:$SOCKS_PORT + if [ $MODE = whonix ] ; then + ssh -o ForwardX11=no user@10.0.2.15 netstat -nl4e| grep 15:$SOCKS_PORT || { + retval=$? + ERROR ssh -o ForwardX11=no user@10.0.2.15 netstat + [ -z "$ALL" ] && exit $ARG$retval || continue ; + } + socks_proxy=socks5h://${SOCKS_HOST}:$SOCKS_PORT + proxy_ping_curl -x $socks_proxy \ + --interface virbr1 n--dns-interface virbr1 https://$HTTP_TARGET >/dev/null || { + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl -x $socks_proxy --interface virbr1 --dns-interface virbr1 https://$HTTP_TARGET + [ -z "$ALL" ] && exit $ARG$retval || continue + } + else + socks_proxy=socks5h://${SOCKS_HOST}:$SOCKS_PORT + proxy_ping_curl -x $socks_proxy https://$HTTP_TARGET >/dev/null \ + || { retval=$? ; ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl ${SOCKS_HOST} $SOCKS_PORT + [ -z "$ALL" ] && exit $ARG$retval || continue ; } + fi + + INFO $prog test=$ARG "${tests[$ARG]}" + # works with user/pass + GREP="$SOCKS_PORT" + + elif [ $ARG -eq 4 ] ; then + tests[4]="dig_socks_through_as_user @${SOCKS_HOST} -p $SOCKS_DNS www.whatismypublicip.com - tordns " + [ $HAVE_DIG = 1 ] || continue + if [ $MODE = whonix ] ; then + ssh -o ForwardX11=no user@10.0.2.15 netstat -nl4e | grep 15:$SOCKS_DNS + fi + dig @${SOCKS_HOST} -p $SOCKS_DNS www.whatismypublicip.com +timeout=$TIMEOUT >/dev/null || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval dig @${SOCKS_HOST} -p $SOCKS_DNS www.whatismypublicip.com + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works with fix + GREP="$SOCKS_DNS" + + elif [ $ARG -eq 5 ] ; then + tests[5]="nslookup_socks_as_user - tordns " + [ $HAVE_NSLOOKUP = 1 ] || continue + desc="$NSL -port=$SOCKS_DNS www.whatismypublicip.com ${DNS_HOST}" + $desc >/dev/null || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval $desc + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" $desc + # works with fix + GREP="$SOCKS_DNS" + + elif [ $ARG -eq 6 ] ; then + proxy=`proxy_ping_get_https` + desc="curl --proxy http://${proxy}" + tests[6]="curl_https_as_user - https " + proxy_ping_curl --proxy http://${proxy} \ + --proxy-insecure https://$HTTP_TARGET || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval $desc + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" $desc + # works + GREP="$HTTP_PORT" + + elif [ $ARG -eq 7 ] ; then + tests[8]="traceroute_icmp_dns_as_root --icmp - trace " + [ $USER = root ] || continue + [ -n "$PROXY_WLAN" ] || proxy_get_if || continue + [ $HAVE_TRACEROUTE = 1 ] || continue + traceroute -i $PROXY_WLAN --icmp $DNS_TARGET -m $THOPS || { \ + retval=$? + ERROR $retval traceroute --icmp -m $THOPS + [ -z "$ALL" ] && exit 7$retval + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="-i icmp" + + elif [ $ARG -eq 8 ] ; then + tests[8]="traceroute_tcp_dns_as_root -i $PROXY_WLAN -p 53 -T4 - trace " + [ $USER = root ] || continue + [ -n "$PROXY_WLAN" ] || proxy_get_if || continue + [ $HAVE_TRACEROUTE = 1 ] || continue + traceroute -i $PROXY_WLAN -p 53 -T4 $DNS_TARGET -m $THOPS || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval traceroute -T4 -p 53 -m $THOPS + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="53" + + elif [ $ARG -eq 9 ] ; then + tests[9]="traceroute_icmp_dns_as_user -p 53 - trace " + [ $USER = root ] || continue + [ -n "$PROXY_WLAN" ] || proxy_get_if || continue + [ $HAVE_TRACEROUTE = 1 ] || continue + traceroute -i $PROXY_WLAN --icmp $DNS_TARGET -p 53 -m $THOPS || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval traceroute -i $PROXY_WLAN --icmp -m $THOPS + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="53" + + elif [ $ARG -eq 10 ] ; then + tests[10]="wget_http_as_user $HTTP_PORT - http " + proxy=`proxy_ping_get_http` + env http_proxy=http://${proxy} \ + $WGET -S http://$HTTP_TARGET 2>/dev/null + retval=$? + # 8 is an oddball + if [ $retval -eq 8 -o $retval -eq 0 ] ; then + INFO $prog test=$ARG "${tests[$ARG]}" wget $HTTP_PORT + else + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval wget $HTTP_PORT + [ -z "$ALL" ] && exit $ARG$retval || continue + fi + GREP="$HTTP_PORT" + + elif [ $ARG -eq 11 ] ; then + tests[11]="curl_https_as_user - https " + proxy=`proxy_ping_get_https` + proxy_ping_curl --proxy http://${proxy} \ + --proxy-insecure https://$HTTP_TARGET || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl $HTTP_PORT + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="$HTTP_PORT" + + elif [ $ARG -eq 12 ] ; then + tests[12]="nmap_dns_as_root --privileged --send-eth -Pn -sU -p U:53 $DNS_HOST1 - nmap direct " + [ $USER = root ] || continue + which nmap 2>/dev/null >/dev/null || continue + [ -z "$DNS_HOST1" ] && DNS_HOST1="208.67.220.220" + nmap --privileged --send-eth -Pn -sU -p U:53 "$DNS_HOST1" || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval nmap 53 + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + GREP="53" + + elif [ $ARG -eq 13 ] ; then + tests[13]="curl_firewall_bin - wifi " + [ $USER = root ] || continue + proxy_test_curl_firewall_bin || continue + INFO $prog test=$ARG "${tests[$ARG]}" curl bin + # works + GREP="443" + + elif [ $ARG -eq 14 ] ; then + tests[14]="traceroute_icmp_gw_as_root --icmp $PROXY_WLAN_GW - gw wifi " + [ $USER = root ] || continue + [ $HAVE_TRACEROUTE = 1 ] || continue + proxy_test_traceroute_icmp_gw || continue + # works + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="-i icmp" + + elif [ $ARG -eq 15 ] ; then + tests[15]="test_dig_direct - direct " + [ $HAVE_DIG = 1 ] || continue + proxy_test_dig_direct || continue + INFO $prog test=$ARG "${tests[$ARG]}" proxy_test_dig_direct + + elif [ $ARG -eq 16 ] ; then + tests[16]="nslookup_as_root nslookup $PRIV_BIN_OWNER - torhost " + [ $USER = root ] || continue + [ $HAVE_NSLOOKUP = 1 ] || continue + su -c "$NSL $DNS_TARGET $DNS_HOST1" -s /bin/sh $PRIV_BIN_OWNER >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval "$NSL $DNS_TARGET $DNS_HOST1" -s /bin/sh $PRIV_BIN_OWNER + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works /fails but maybe a noop + GREP="53" + + elif [ $ARG -eq 17 ] ; then + tests[17]="ntpdate_as_root ntpdate without service - ntp " + proxy_run_as_root || exit 9 + [ -x /usr/sbin/ntpdate ] || continue + # Curious: even though sgid 2755 ntp it fails as su ntp + # 12 Nov 23:28:35 ntpdate[17341]: bind() fails: Permission denied + /usr/sbin/ntpdate "$NTP_HOST1" || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval ntpdate + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="123" + elif [ $ARG -eq 18 ] ; then + tests[18]="ntpdate_as_root ntpdate with servie - ntp " + proxy_run_as_root || exit 9 + proxy_rc_service ntpd status >/dev/null && \ + proxy_rc_service ntpd stop >/dev/null && sleep 2 + /usr/sbin/ntpdate $NTP_HOST1 || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval ntpdate + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + proxy_rc_service ntpd status >/dev/null || proxy_rc_service ntpd start + GREP="123" + elif [ $ARG -eq 19 ] ; then + tests[19]="curl_noproxy_http_as_user curl raw noproxy - firefail " + proxy_ping_curl --noproxy "'*.*'" --connect-timeout $TIMEOUT \ + http://$HTTP_TARGET >/dev/null && { + retval=$? + ERROR PANIC: $prog test=$ARG "${tests[$ARG]}" curl raw --noproxy + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP=80 + + elif [ $ARG -eq 20 ] ; then + tests[20]="curl_socksproxy_as_user curl $SOCKS_PORT - socks " + # needs dns + [ $DEBIAN -eq 0 ] && continue + + socks_proxy=socks5h://${SOCKS_HOST}:$SOCKS_PORT + proxy_ping_curl -x $socks_proxy https://$HTTP_TARGET >/dev/null \ + || { retval=$? ; ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl $SOCKS_PORT + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works with user/pass + GREP="$SOCKS_PORT" + + elif [ $ARG -eq 21 ] ; then + tests[21]="curl_httpsproxy_as_user - https " + [ -z "$https_proxy" ] && https_proxy=http://${HTTPS_PROXY_HOST}:${HTTPS_PORT} + proxy_ping_curl -x $https_proxy https://$HTTP_TARGET >/dev/null || { \ + if [ "$MODE" = gateway ] ; then + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval curl ${HTTPS_HOST} ${HTTPS_PORT} + continue + else + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl ${HTTPS_HOST} HTTPS_PORT=${HTTPS_PORT} + [ -z "$ALL" ] && exit $ARG$retval || continue + fi + } + INFO $prog test=$ARG "${tests[$ARG]}" curl ${HTTPS_HOST} ${HTTPS_PORT} + GREP="${HTTPS_PORT}" + + elif [ $ARG -eq 22 ] ; then + tests[22]="iwlist_scan_as_user iwlist $PROXY_WLAN scan - iwlist " + [ $USER = root ] || continue + which iwlist 2>/dev/null || continue + [ -n "$PROXY_WLAN" ] || proxy_get_if || continue + iwlist $PROXY_WLAN scan >/dev/null || { + ERROR $prog retval=$? test=$ARG $PROXY_WLAN scan + [ -z "$ALL" ] && exit $ARG$1 || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + + elif [ $ARG -eq 23 ] ; then + tests[23]="curl_proxy_as_user - direct " + proxy_ping_curl --insecure https://$HTTP_TARGET >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl direct + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + + elif [ $ARG -eq 24 ] ; then + tests[24]="dig_direct_or_dnsmasq dig -b $IP www.whatismypublicip.com - direct " + [ $HAVE_DIG = 1 ] || continue + [ -n "$PROXY_WLAN" -a -n "$IP" ] || proxy_ping_get_wlan_gw || continue + [ -n "$IP" ] || continue + dig -b $IP www.whatismypublicip.com +timeout=$TIMEOUT >/dev/null || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval dig -b $IP + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" dig -b $IP + + elif [ $ARG -eq 25 ] ; then + tests[25]="nslookup_as_user - direct " + [ $HAVE_NSLOOKUP = 1 ] || continue + # noenv with or without proxy + # @$DNS_HOST1 should fail for firewall unless dnsmasq is working + $NSL >/dev/null www.whatismypublicip.com || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval nslookup www.whatismypublicip.com + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" nslookup + + elif [ $ARG -eq 26 ] ; then + tests[26]="route_connected_ping_scan - direct " + [ $HAVE_DIG = 1 ] || continue + #? proxy_test_pretests + proxy_do_ping && \ + INFO $prog test=$ARG "${tests[$ARG]}" retval=$retval dig -b $IP || \ + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval dig -b $IP + + elif [ $ARG -eq 27 ] ; then + tests[27]="dns_as_user dig -b 127.0.0.1 - direct " + [ $HAVE_DIG = 1 ] || continue + [ -n "$PROXY_WLAN" -a -n "$IP" ] || proxy_ping_get_wlan_gw || continue + dig -b 127.0.0.1 www.whatismypublicip.com +timeout=$TIMEOUT >/dev/null || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval dig -b $IP + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + + elif [ $ARG -eq 28 ] ; then + tests[28]="wget_as_user - direct " + proxy_ping_test_env || { WARN $prog test=$ARG "${tests[$ARG]}" no proxy in env ; } + $WGET -S https://$HTTP_TARGET 2>/dev/null + retval=$? + if [ $retval -eq 8 -o $retval -eq 0 ] ; then + INFO $prog test=$ARG "${tests[$ARG]}" wget + else + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval wget + [ -z "$ALL" ] && exit $ARG$retval || continue + fi + + elif [ $ARG -eq 29 ] ; then + tests[29]="curl_as_user - direct " + proxy_ping_test_env || { WARN $prog test=$ARG "${tests[$ARG]}" no proxy in env ; } + proxy_ping_curl https://$HTTP_TARGET >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval curl + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + + elif [ $ARG -eq 30 ] ; then + tests[30]="tor_bootstrap_check_as_root tor_bootstrap_check.py - torhost " + [ $MODE = tor -o $MODE = selektor ] || { + ERROR $prog MODE != tor test=$ARG + [ -z "$ALL" ] && exit $ARG$retval || continue + } + port=$SOCKS_PORT + $NETS | grep -q :$port || { + ERROR $prog retval=$? test=$ARG tor not running on $port + [ -z "$ALL" ] && exit $ARG || continue + } + [ $USER = root ] || continue + + # was /usr/local/bin/tor_bootstrap_check.bash + [ -f /usr/local/src/helper-scripts/tor_bootstrap_check.py ] || return 1 + python3.sh /usr/local/src/helper-scripts/tor_bootstrap_check.py + # morons 100% + retval=$? + [ $retval -eq 0 -o $retval -eq 100 ] || { \ + retval=$? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval tor_bootstrap_check + } + INFO $prog test=$ARG "${tests[$ARG]}" + + elif [ $ARG -eq 31 ] ; then + tests[31]="curl_noproxy_as_root polipo http pages $HTTP_PORT - direct http " + proxy_ping_curl --noproxy http://${HTTP_HOST}:$HTTP_PORT && { \ + retval=$? + ERROR PANIC: $prog test=$ARG "${tests[$ARG]}" retval=$retval polipo http pages $HTTP_PORT + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + GREP="$HTTP_PORT" + + elif [ $ARG -eq 32 ] ; then + tests[32]="ping_nmap_direct_as_root nmap 53 - direct " + [ $USER = root ] || continue + which nmap 2>/dev/null >/dev/null || continue + [ -n "$PROXY_WLAN" -a -n "$PROXY_WLAN_GW" ] || proxy_ping_get_wlan_gw || continue + proxy_ping_nmap_direct $DNS_HOST1 "$PROXY_WLAN_GW" U:67 || { + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval nmapd 53 + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + GREP="53" + + elif [ $ARG -eq 33 ] ; then + tests[33]="host_virbr_as_user proxy_ping_test_virbr 1 - libvirthost " + proxy_ping_test_virbr 1 || { + retval=$? + ERROR $CONN virbr1 not running + [ -z "$ALL" ] && exit 1 || continue + } + # * Immediate connect fail for 10.0.2.15: Connection refused + INFO $prog test=$ARG "${tests[$ARG]}" + + elif [ $ARG -eq 34 ] ; then + tests[34]="python_ping_as_root traceroute --icmp $PROXY_WLAN_GW - wifi " + [ $USER = root ] || continue + [ -n "$PROXY_WLAN_GW" -a -n "$IP" ] || PROXY_WLAN_GW=`proxy_ping_get_wlan_gw` || continue + [ -f /usr/local/bin/ping2.py ] || continue + /usr/local/bin/ping2.py $IP $DNS_HOST1 $PROXY_WLAN_GW || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval ping2.py $DNS_HOST1 + [ -z "$ALL" ] && exit $ARG$retval || continue + } + # works + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="-i icmp" + + elif [ $ARG -eq 35 ] ; then + tests[35]="dig_as_root - firewall dig @$DNS_HOST1 - torhost dns " + [ $USER = root ] || continue + [ $HAVE_DIG = 1 ] || continue + # @$DNS_HOST1 + su -c "dig pool.ntp.org +timeout=$TIMEOUT" -s /bin/sh $PRIV_BIN_OWNER >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval dig pool.ntp.org $PRIV_BIN_OWNER + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + GREP="53" + + elif [ $ARG -eq 36 ] ; then + tests[36]="tor_resolve_as_user tor-resolve pool.ntp.org - tordns " + [ $HAVE_TOR_RESOLVE = 1 ] || continue + tor-resolve pool.ntp.org >/dev/null || { \ + retval=$? + # dunno Failed parsing SOCKS5 response conf? + WARN $prog test=$ARG "${tests[$ARG]}" retval=$retval tor-resolve pool.ntp.org + continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + # works + GREP="9053" + + elif [ $ARG -eq 37 ] ; then + tests[37]="qemu-guest-agent and ports - libvirtguest " + ser=qemu-guest-agent + proxy_rc_service $ser status >/dev/null || proxy_rc_service $ser start + proxy_rc_service $ser status >/dev/null || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval $ser status + [ -z "$ALL" ] && exit $ARG$retval || continue + } + [ -d /dev/virtio-ports ] || { \ + retval=$? + ERROR $prog test=$ARG "${tests[$ARG]}" retval=$retval /dev/virtio-ports + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + GREP="" + elif [ $ARG -eq 38 ] ; then + tests[38]="qemu-guest-agent and ports - libvirthost whonix " + [ $USER = root ] || continue + $PL proxy_libvirt_list + aret=$? + if [ $aret -eq 10 ] ;then + WARN proxy_libvirt_status hung + elif [ $aret -ne 10 -a $aret -ne 0 ] ; then + DBUG proxy_libvirt_status aret=$aret + else + $PL proxy_libvirt_list | grep -q "$GATEW_DOM" || { + ERROR MODE=$MODE and $GATEW_DOM not running ; + [ -z "$ALL" ] && exit $ARG$retval || continue + } + INFO $prog test=$ARG "${tests[$ARG]}" + fi + elif false ; then + if ! grep -q '10.152.152.10\|127.0.0.1' /etc/resolv.conf ; then + $NETS | grep -q :53 || { + ERROR $prog retval=$? test=$ARG local resolv.conf but :53 not running + [ -z "$ALL" ] && exit 1 || continue + } + fi + + fi + [ -n "$GREP" ] && [ $DMESG_LINES -gt 0 ] && \ + DBUG `dmesg|tail|grep $GREP|tail -$DMESG_LINES` + + done +exit 0 + + 1) + env https_proxy=http://${SOCKS_HOST}:${HTTPS_PORT} wget $D -O - --no-check-certificate + 2) + curl $D -k --proxy + 3) + curl $D -k --proxy socks5://${SOCKS_HOST}:$SOCKS_PORT --proxy-insecure + 5) + nslookup -port=$SOCKS_DNS www.whatismypublicip.com ${SOCKS_HOST} \ + 6) + curl -k --proxy $HTTP_PORT + 16) + nslookup $PRIV_BIN_OWNER + 18) + ntpdate as sroot + 19) + curl raw noproxy + 0) + usage + diff --git a/overlay/Linux/usr/local/bin/proxy_testssl_lib.bash b/overlay/Linux/usr/local/bin/proxy_testssl_lib.bash new file mode 100755 index 0000000..5828a87 --- /dev/null +++ b/overlay/Linux/usr/local/bin/proxy_testssl_lib.bash @@ -0,0 +1,20100 @@ +#!/usr/bin/env bash +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- +# +# vim:ts=5:sw=5:expandtab +# we have a spaces softtab, that ensures readability with other editors too +ROLE=proxy + +# testssl.sh is a program for spotting weak SSL/TLS encryption, ciphers, protocols and some +# vulnerabilities or features. It may or may be not distributed by your distribution. +# The upstream versions are available (please leave the links intact): +# +# Development version https://github.com/drwetter/testssl.sh +# Stable version https://testssl.sh +# File bugs at github https://github.com/drwetter/testssl.sh/issues +# +# Project lead and initiator: Dirk Wetter, copyleft: 2007-today. +# Main contributions from David Cooper. Further contributors see CREDITS.md . +# +# License: GPLv2, see https://www.fsf.org/licensing/licenses/info/GPLv2.html +# and accompanying license "LICENSE.txt". Redistribution + modification under this +# license permitted. +# If you enclose this program or parts of it in your software, it has to be +# accompanied by the same license (see link). Do not violate the license. +# If you do not agree to these terms, do not use it in the first place! +# +# OpenSSL, which is being used and maybe distributed via one of this projects' +# web sites, is subject to their licensing: https://www.openssl.org/source/license.txt +# +# The client simulation data comes from SSLlabs and is licensed to the 'Qualys SSL Labs +# Terms of Use' (v2.2), see https://www.ssllabs.com/downloads/Qualys_SSL_Labs_Terms_of_Use.pdf, +# stating a CC BY 3.0 US license: https://creativecommons.org/licenses/by/3.0/us/ +# +# Please note: USAGE WITHOUT ANY WARRANTY, THE SOFTWARE IS PROVIDED "AS IS". +# USE IT AT your OWN RISK! +# Seriously! The threat is you run this code on your computer and untrusted input e.g. +# could be supplied from a server you are querying. +# +# HISTORY: +# Back in 2006 it all started with a few openssl commands... +# That's because openssl is a such a good swiss army knife (see e.g. +# https://wiki.openssl.org/index.php/Command_Line_Utilities) that it was difficult to resist +# wrapping some shell commands around it, which I used for my pen tests. This is how +# everything started. +# Now it has grown up, it has bash socket support for most features, which has been basically +# replacing more and more functions of OpenSSL and some sockets functions serve as some kind +# of central functions. +# +# WHY BASH? +# Cross-platform is one of the three main goals of this script. Second: Ease of installation. +# No compiling, install gems, go to CPAN, use pip etc. Third: Easy to use and to interpret +# the results. +# /bin/bash including the builtin sockets fulfill all that. The socket checks in bash may sound +# cool and unique -- they are -- but probably you can achieve e.g. the same result with my favorite +# interactive shell: zsh (zmodload zsh/net/socket -- checkout zsh/net/tcp) too! Oh, and btw. +# ksh93 has socket support too. +# /bin/bash though is way more often used within Linux and it's perfect for cross platform support. +# MacOS X has it and also under Windows the MSYS2 extension or Cygwin as well as Bash on Windows (WSL) +# has /bin/bash. +# +# Q: So what's the difference to www.ssllabs.com/ssltest/ or sslcheck.globalsign.com/ ? +# A: As of now ssllabs only check 1) webservers 2) on standard ports, 3) reachable from the +# internet. And those examples above 4) are 3rd parties. If these restrictions are all fine +# with you and you need a management compatible rating -- go ahead and use those. +# +# But also if your fine with those restrictions: testssl.sh is meant as a tool in your hand +# and it's way more flexible. Oh, and did I mention testssl.sh is open source? +# +#################### Stop talking, action now #################### + + +########### Definition of error codes +# +declare -r ERR_BASH=255 # Bash version incorrect +declare -r ERR_CMDLINE=254 # Cmd line couldn't be parsed +declare -r ERR_FCREATE=253 # Output file couldn't be created +declare -r ERR_FNAMEPARSE=252 # Input file couldn't be parsed +declare -r ERR_NOSUPPORT=251 # Feature requested is not supported +declare -r ERR_OSSLBIN=250 # Problem with OpenSSL binary +declare -r ERR_DNSBIN=249 # Problem with DNS lookup binaries +declare -r ERR_OTHERCLIENT=248 # Other client problem +declare -r ERR_DNSLOOKUP=247 # Problem with resolving IP addresses or names +declare -r ERR_CONNECT=246 # Connectivity problem +declare -r ERR_CLUELESS=245 # Weird state, either though user options or testssl.sh +declare -r ERR_RESOURCE=244 # Resources testssl.sh needs couldn't be read +declare -r ERR_CHILD=242 # Child received a signal from master +declare -r ALLOK=0 # All is fine + + +[ -z "${BASH_VERSINFO[0]}" ] && printf "\n\033[1;35m Please make sure you're using \"bash\"! Bye...\033[m\n\n" >&2 && exit $ERR_BASH +[ $(kill -l | grep -c SIG) -eq 0 ] && printf "\n\033[1;35m Please make sure you're calling me without leading \"sh\"! Bye...\033[m\n\n" >&2 && exit $ERR_BASH +[ ${BASH_VERSINFO[0]} -lt 3 ] && printf "\n\033[1;35m Minimum requirement is bash 3.2. You have $BASH_VERSION \033[m\n\n" >&2 && exit $ERR_BASH +[ ${BASH_VERSINFO[0]} -le 3 ] && [ ${BASH_VERSINFO[1]} -le 1 ] && printf "\n\033[1;35m Minimum requirement is bash 3.2. You have $BASH_VERSION \033[m\n\n" >&2 && exit $ERR_BASH + +########### Debugging helpers + profiling +# +declare -r PS4='|${LINENO}> \011${FUNCNAME[0]:+${FUNCNAME[0]}(): }' +DEBUGTIME=${DEBUGTIME:-false} # https://stackoverflow.com/questions/5014823/how-to-profile-a-bash-shell-script-slow-startup#20855353, profiling bash +DEBUG_ALLINONE=${DEBUG_ALLINONE:-false} # true: do debugging in one screen (old behavior for testssl.sh and bash3's default + # false: needed for performance analysis or useful for just having an extra file +DEBUG_ALLINONE=${SETX:-false} # SETX as a shortcut for old style debugging, overriding DEBUG_ALLINONE +if [[ "$SHELLOPTS" =~ xtrace ]]; then + if "$DEBUGTIME"; then + # separate debugging, doesn't mess up the screen, $DEBUGTIME determines whether we also do performance analysis + exec 42>&2 2> >(tee /tmp/testssl-$$.log | sed -u 's/^.*$/now/' | date -f - +%s.%N >/tmp/testssl-$$.time) + # BASH_XTRACEFD=42 + else + if ! "$DEBUG_ALLINONE"; then + exec 42>| /tmp/testssl-$$.log + BASH_XTRACEFD=42 + fi + fi +fi + +########### Traps! Make sure that temporary files are cleaned up after use in ANY case +# +trap "cleanup" QUIT EXIT +trap "child_error" USR1 + + +########### Internal definitions +# +declare -r VERSION="3.0.2" +declare -r SWCONTACT="dirk aet testssl dot sh" +grep -E -q "dev|rc|beta" <<< "$VERSION" && \ + SWURL="https://testssl.sh/dev/" || + SWURL="https://testssl.sh/" +if git log &>/dev/null; then + declare -r GIT_REL="$(git log --format='%h %ci' -1 2>/dev/null | awk '{ print $1" "$2" "$3 }')" + declare -r GIT_REL_SHORT="$(git log --format='%h %ci' -1 2>/dev/null | awk '{ print $1 }')" + declare -r REL_DATE="$(git log --format='%h %ci' -1 2>/dev/null | awk '{ print $2 }')" +fi +declare -r PROG_NAME="$(basename "$0")" +declare -r RUN_DIR="$(dirname "$0")" +declare -r SYSTEM="$(uname -s)" +declare -r SYSTEMREV="$(uname -r)" +SYSTEM2="" # currently only being used for WSL = bash on windows +TESTSSL_INSTALL_DIR="/" # If you run testssl.sh and it doesn't find it necessary file automagically set TESTSSL_INSTALL_DIR +CA_BUNDLES_PATH="${CA_BUNDLES_PATH:-""}" # You can have your stores some place else +ADDITIONAL_CA_FILES="${ADDITIONAL_CA_FILES:-""}" # single file with a CA in PEM format or comma separated lists of them +CIPHERS_BY_STRENGTH_FILE="" +TLS_DATA_FILE="" # mandatory file for socket-based handshakes +OPENSSL_LOCATION="" +HNAME="$(hostname)" +HNAME="${HNAME%%.*}" + +declare CMDLINE +CMDLINE_PARSED="" # This makes sure we don't let early fatal() write into files when files aren't created yet +declare -r -a CMDLINE_ARRAY=("$@") # When performing mass testing, the child processes need to be sent the +declare -a MASS_TESTING_CMDLINE # command line in the form of an array (see #702 and https://mywiki.wooledge.org/BashFAQ/050). + + +########### Some predefinitions: date, sed (we always use test and NOT try to determine +# capabilities by querying the OS) +# +HAS_GNUDATE=false +HAS_FREEBSDDATE=false +HAS_OPENBSDDATE=false +if date -d @735275209 >/dev/null 2>&1; then + if date -r @735275209 >/dev/null 2>&1; then + # It can't do any conversion from a plain date output. + HAS_OPENBSDDATE=true + else + HAS_GNUDATE=true + fi +fi +# FreeBSD and OS X date(1) accept "-f inputformat", so do newer OpenBSD versions >~ 6.6. +date -j -f '%s' 1234567 >/dev/null 2>&1 && \ + HAS_FREEBSDDATE=true + +echo A | sed -E 's/A//' >/dev/null 2>&1 && \ + declare -r HAS_SED_E=true || \ + declare -r HAS_SED_E=false + +########### Terminal defintions +tty -s && \ + declare -r INTERACTIVE=true || \ + declare -r INTERACTIVE=false + +if [[ -z $TERM_WIDTH ]]; then # no batch file and no otherwise predefined TERM_WIDTH + if ! tput cols &>/dev/null || ! "$INTERACTIVE";then # Prevent tput errors if running non interactive + export TERM_WIDTH=${COLUMNS:-80} + else + export TERM_WIDTH=${COLUMNS:-$(tput cols)} # for custom line wrapping and dashes + fi +fi +TERM_CURRPOS=0 # custom line wrapping needs alter the current horizontal cursor pos + + +########### Defining (and presetting) variables which can be changed +# +# Following variables make use of $ENV and can be used like "OPENSSL= ./testssl.sh " +declare -x OPENSSL +OPENSSL_TIMEOUT=${OPENSSL_TIMEOUT:-""} # Default connect timeout with openssl before we call the server side unreachable +CONNECT_TIMEOUT=${CONNECT_TIMEOUT:-""} # Default connect timeout with sockets before we call the server side unreachable +PHONE_OUT=${PHONE_OUT:-false} # Whether testssl can retrieve CRLs and OCSP +FAST_SOCKET=${FAST_SOCKET:-false} # EXPERIMENTAL feature to accelerate sockets -- DO NOT USE it for production +COLOR=${COLOR:-2} # 3: Extra color (ciphers, curves), 2: Full color, 1: B/W only 0: No ESC at all +COLORBLIND=${COLORBLIND:-false} # if true, swap blue and green in the output +SHOW_EACH_C=${SHOW_EACH_C:-false} # where individual ciphers are tested show just the positively ones tested +SHOW_SIGALGO=${SHOW_SIGALGO:-false} # "secret" switch whether testssl.sh shows the signature algorithm for -E / -e +SNEAKY=${SNEAKY:-false} # is the referer and useragent we leave behind just usual? +QUIET=${QUIET:-false} # don't output the banner. By doing this you acknowledge usage term appearing in the banner +SSL_NATIVE=${SSL_NATIVE:-false} # we do per default bash sockets where possible "true": switch back to "openssl native" +ASSUME_HTTP=${ASSUME_HTTP:-false} # in seldom cases (WAF, old servers, grumpy SSL) service detection fails. "True" enforces HTTP checks +BASICAUTH=${BASICAUTH:-""} # HTTP basic auth credentials can be set here like user:pass +BUGS=${BUGS:-""} # -bugs option from openssl, needed for some BIG IP F5 +WARNINGS=${WARNINGS:-""} # can be either off or batch +DEBUG=${DEBUG:-0} # 1: normal output the files in /tmp/ are kept for further debugging purposes + # 2: list more what's going on , also lists some errors of connections + # 3: slight hexdumps + other info, + # 4: display bytes sent via sockets + # 5: display bytes received via sockets + # 6: whole 9 yards +FAST=${FAST:-false} # preference: show only first cipher, run_allciphers with openssl instead of sockets +WIDE=${WIDE:-false} # whether to display for some options just ciphers or a table w hexcode/KX,Enc,strength etc. +MASS_TESTING_MODE=${MASS_TESTING_MODE:-serial} # can be serial or parallel. Subject to change +LOGFILE="${LOGFILE:-""}" # logfile if used +JSONFILE="${JSONFILE:-""}" # jsonfile if used +CSVFILE="${CSVFILE:-""}" # csvfile if used +HTMLFILE="${HTMLFILE:-""}" # HTML if used +FNAME=${FNAME:-""} # file name to read commands from +FNAME_PREFIX=${FNAME_PREFIX:-""} # output filename prefix, see --outprefix +APPEND=${APPEND:-false} # append to csv/json file instead of overwriting it +[[ -z "$NODNS" ]] && declare NODNS # If unset it does all DNS lookups per default. "min" only for hosts or "none" at all +HAS_IPv6=${HAS_IPv6:-false} # if you have OpenSSL with IPv6 support AND IPv6 networking set it to yes +ALL_CLIENTS=${ALL_CLIENTS:-false} # do you want to run all client simulation form all clients supplied by SSLlabs? +OFFENSIVE=${OFFENSIVE:-true} # do you want to include offensive vulnerability tests which may cause blocking by an IDS? + +########### Tuning vars which cannot be set by a cmd line switch. Use instead e.g "HEADER_MAXSLEEP=10 ./testssl.sh " +# +EXPERIMENTAL=${EXPERIMENTAL:-false} # a development hook which allows us to disable code +PROXY_WAIT=${PROXY_WAIT:-20} # waiting at max 20 seconds for socket reply through proxy +DNS_VIA_PROXY=${DNS_VIA_PROXY:-true} # do DNS lookups via proxy. --ip=proxy reverses this +IGN_OCSP_PROXY=${IGN_OCSP_PROXY:-false} # Also when --proxy is supplied it is ignored when testing for revocation via OCSP via --phone-out +HEADER_MAXSLEEP=${HEADER_MAXSLEEP:-5} # we wait this long before killing the process to retrieve a service banner / http header +MAX_SOCKET_FAIL=${MAX_SOCKET_FAIL:-2} # If this many failures for TCP socket connects are reached we terminate +MAX_OSSL_FAIL=${MAX_OSSL_FAIL:-2} # If this many failures for s_client connects are reached we terminate +MAX_HEADER_FAIL=${MAX_HEADER_FAIL:-2} # If this many failures for HTTP GET are encountered we don't try again to get the header +MAX_WAITSOCK=${MAX_WAITSOCK:-10} # waiting at max 10 seconds for socket reply. There shouldn't be any reason to change this. +CCS_MAX_WAITSOCK=${CCS_MAX_WAITSOCK:-5} # for the two CCS payload (each). There shouldn't be any reason to change this. +HEARTBLEED_MAX_WAITSOCK=${HEARTBLEED_MAX_WAITSOCK:-8} # for the heartbleed payload. There shouldn't be any reason to change this. +STARTTLS_SLEEP=${STARTTLS_SLEEP:-10} # max time wait on a socket for STARTTLS. MySQL has a fixed value of 1 which can't be overwritten (#914) +FAST_STARTTLS=${FAST_STARTTLS:-true} # at the cost of reliability decrease the handshakes for STARTTLS +USLEEP_SND=${USLEEP_SND:-0.1} # sleep time for general socket send +USLEEP_REC=${USLEEP_REC:-0.2} # sleep time for general socket receive +HSTS_MIN=${HSTS_MIN:-179} # >179 days is ok for HSTS + HSTS_MIN=$((HSTS_MIN * 86400)) # correct to seconds +HPKP_MIN=${HPKP_MIN:-30} # >=30 days should be ok for HPKP_MIN, practical hints? + HPKP_MIN=$((HPKP_MIN * 86400)) # correct to seconds +DAYS2WARN1=${DAYS2WARN1:-60} # days to warn before cert expires, threshold 1 +DAYS2WARN2=${DAYS2WARN2:-30} # days to warn before cert expires, threshold 2 +VULN_THRESHLD=${VULN_THRESHLD:-1} # if vulnerabilities to check >$VULN_THRESHLD we DON'T show a separate header line in the output each vuln. check +UNBRACKTD_IPV6=${UNBRACKTD_IPV6:-false} # some versions of OpenSSL (like Gentoo) don't support [bracketed] IPv6 addresses +NO_ENGINE=${NO_ENGINE:-false} # if there are problems finding the (external) openssl engine set this to true +declare -r CLIENT_MIN_PFS=5 # number of ciphers needed to run a test for PFS +CAPATH="${CAPATH:-/etc/ssl/certs/}" # Does nothing yet (FC has only a CA bundle per default, ==> openssl version -d) +GOOD_CA_BUNDLE="" # A bundle of CA certificates that can be used to validate the server's certificate +CERTIFICATE_LIST_ORDERING_PROBLEM=false # Set to true if server sends a certificate list that contains a certificate + # that does not certify the one immediately preceding it. (See RFC 8446, Section 4.4.2) +STAPLED_OCSP_RESPONSE="" +HAS_DNS_SANS=false # Whether the certificate includes a subjectAltName extension with a DNS name or an application-specific identifier type. +MEASURE_TIME_FILE=${MEASURE_TIME_FILE:-""} +if [[ -n "$MEASURE_TIME_FILE" ]] && [[ -z "$MEASURE_TIME" ]]; then + MEASURE_TIME=true +else + MEASURE_TIME=${MEASURE_TIME:-false} +fi +DISPLAY_CIPHERNAMES="openssl" # display OpenSSL ciphername (but both OpenSSL and RFC ciphernames in wide mode) +declare -r UA_STD="TLS tester from $SWURL" +declare -r UA_SNEAKY="Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0" + +########### Initialization part, further global vars just being declared here +# +PRINTF="" # which external printf to use. Empty presets the internal one, see #1130 +IKNOW_FNAME=false +FIRST_FINDING=true # is this the first finding we are outputting to file? +JSONHEADER=true # include JSON headers and footers in HTML file, if one is being created +CSVHEADER=true # same for CSV +HTMLHEADER=true # same for HTML +SECTION_FOOTER_NEEDED=false # kludge for tracking whether we need to close the JSON section object +GIVE_HINTS=false # give an additional info to findings +SERVER_SIZE_LIMIT_BUG=false # Some servers have either a ClientHello total size limit or a 128 cipher limit (e.g. old ASAs) +MULTIPLE_CHECKS=false # need to know whether an MX record or a hostname resolves to multiple IPs to check +CHILD_MASS_TESTING=${CHILD_MASS_TESTING:-false} +TIMEOUT_CMD="" +HAD_SLEPT=0 +NR_SOCKET_FAIL=0 # Counter for socket failures +NR_OSSL_FAIL=0 # .. for OpenSSL connects +NR_HEADER_FAIL=0 # .. for HTTP_GET +PROTOS_OFFERED="" # This keeps which protocol is being offered. See has_server_protocol(). +TLS12_CIPHER_OFFERED="" # This contains the hexcode of a cipher known to be supported by the server with TLS 1.2 +CURVES_OFFERED="" # This keeps which curves have been detected. Just for error handling +KNOWN_OSSL_PROB=false # We need OpenSSL a few times. This variable is an indicator if we can't connect. Eases handling +DETECTED_TLS_VERSION="" # .. as hex string, e.g. 0300 or 0303 +TLS13_ONLY=false # Does the server support TLS 1.3 ONLY? +OSSL_SHORTCUT=${OSSL_SHORTCUT:-false} # Hack: if during the scan turns out the OpenSSL binary suports TLS 1.3 would be a better choice, this enables it. +TLS_EXTENSIONS="" +declare -r NPN_PROTOs="spdy/4a2,spdy/3,spdy/3.1,spdy/2,spdy/1,http/1.1" +# alpn_protos needs to be space-separated, not comma-seperated, including odd ones observed @ facebook and others, old ones like h2-17 omitted as they could not be found +declare -r ALPN_PROTOs="h2 spdy/3.1 http/1.1 grpc-exp h2-fb spdy/1 spdy/2 spdy/3 stun.turn stun.nat-discovery webrtc c-webrtc ftp" +declare -a SESS_RESUMPTION +TEMPDIR="" +TMPFILE="" +ERRFILE="" +CLIENT_AUTH=false +TLS_TICKETS=false +NO_SSL_SESSIONID=false +CERT_COMPRESSION=${CERT_COMPRESSION:-false} # secret flag to set in addition to --devel for certificate compression +HOSTCERT="" # File with host certificate, without intermediate certificate +HEADERFILE="" +HEADERVALUE="" +HTTP_STATUS_CODE="" +DH_GROUP_OFFERED="" +DH_GROUP_LEN_P=0 +KEY_SHARE_EXTN_NR="33" # The extension number for key_share was changed from 40 to 51 in TLSv1.3 draft 23. + # In order to support draft 23 and later in addition to earlier drafts, need to + # know which extension number to use. Note that it appears that a single + # ClientHello cannot advertise both draft 23 and later and earlier drafts. + # Preset may help to deal with STARTTLS + TLS 1.3 draft 23 and later but not earlier. +BAD_SERVER_HELLO_CIPHER=false # reserved for cases where a ServerHello doesn't contain a cipher offered in the ClientHello +GOST_STATUS_PROBLEM=false +PATTERN2SHOW="" +SOCK_REPLY_FILE="" +NW_STR="" +LEN_STR="" +SNI="" +POODLE="" # keep vulnerability status for TLS_FALLBACK_SCSV +OSSL_NAME="" # openssl name, in case of LibreSSL it's LibreSSL +OSSL_VER="" # openssl version, will be auto-determined +OSSL_VER_MAJOR=0 +OSSL_VER_MINOR=0 +OSSL_VER_APPENDIX="none" +CLIENT_PROB_NO=1 +HAS_DH_BITS=${HAS_DH_BITS:-false} # initialize openssl variables +HAS_CURVES=false +OSSL_SUPPORTED_CURVES="" +HAS_SSL2=false +HAS_SSL3=false +HAS_TLS13=false +HAS_X448=false +HAS_X25519=false +HAS_PKUTIL=false +HAS_PKEY=false +HAS_NO_SSL2=false +HAS_NOSERVERNAME=false +HAS_CIPHERSUITES=false +HAS_COMP=false +HAS_NO_COMP=false +HAS_ALPN=false +HAS_NPN=false +HAS_FALLBACK_SCSV=false +HAS_PROXY=false +HAS_XMPP=false +HAS_POSTGRES=false +HAS_MYSQL=false +HAS_LMTP=false +HAS_NNTP=false +HAS_IRC=false +HAS_CHACHA20=false +HAS_AES128_GCM=false +HAS_AES256_GCM=false +HAS_ZLIB=false +HAS_DIG=false +HAS_HOST=false +HAS_DRILL=false +HAS_NSLOOKUP=false +HAS_IDN=false +HAS_IDN2=false +HAS_AVAHIRESOLVE=false +HAS_DIG_NOIDNOUT=false + +OSSL_CIPHERS_S="" +PORT=443 # unless otherwise auto-determined, see below +NODE="" +NODEIP="" +rDNS="" +CORRECT_SPACES="" # Used for IPv6 and proper output formatting +IPADDRs="" +IP46ADDRs="" +LOCAL_A=false # Does the $NODEIP come from /etc/hosts? +LOCAL_AAAA=false # Does the IPv6 IP come from /etc/hosts? +XMPP_HOST="" +PROXYIP="" # $PROXYIP:$PROXPORT is your proxy if --proxy is defined ... +PROXYPORT="" # ... and openssl has proxy support +PROXY="" # Once check_proxy() executed it contains $PROXYIP:$PROXPORT +VULN_COUNT=0 +SERVICE="" # Is the server running an HTTP server, SMTP, POP or IMAP? +URI="" +CERT_FINGERPRINT_SHA2="" +RSA_CERT_FINGERPRINT_SHA2="" +STARTTLS_PROTOCOL="" +OPTIMAL_PROTO="" # Need this for IIS6 (sigh) + OpenSSL 1.0.2, otherwise some handshakes will fail see + # https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892 +STARTTLS_OPTIMAL_PROTO="" # Same for STARTTLS, see https://github.com/drwetter/testssl.sh/issues/188 +OPTIMAL_SOCKETS_PROTO="" # Same for tls_sockets(). -- not yet used +ALL_FAILED_SOCKETS=true # Set to true if all attempts to connect to server using tls_sockets/sslv2_sockets failed +TLS_TIME="" # To keep the value of TLS server timestamp +TLS_NOW="" # Similar +TLS_DIFFTIME_SET=false # Tells TLS functions to measure the TLS difftime or not +NOW_TIME="" +HTTP_TIME="" +GET_REQ11="" +START_TIME=0 # time in epoch when the action started +END_TIME=0 # .. ended +SCAN_TIME=0 # diff of both: total scan time +LAST_TIME=0 # only used for performance measurements (MEASURE_TIME=true) +SERVER_COUNTER=0 # Counter for multiple servers + +TLS_LOW_BYTE="" # For "secret" development stuff, see -q below +HEX_CIPHER="" # " + + +########### Global variables for parallel mass testing +# +declare -r PARALLEL_SLEEP=1 # Time to sleep after starting each test +MAX_WAIT_TEST=${MAX_WAIT_TEST:-1200} # Maximum time (in seconds) to wait for a test to complete +MAX_PARALLEL=${MAX_PARALLEL:-20} # Maximum number of tests to run in parallel + # This value may be made larger on systems with faster processors +declare -a -i PARALLEL_TESTING_PID=() # process id for each child test (or 0 to indicate test has already completed) +declare -a PARALLEL_TESTING_CMDLINE=() # command line for each child test +declare -i NR_PARALLEL_TESTS=0 # number of parallel tests run +declare -i NEXT_PARALLEL_TEST_TO_FINISH=0 # number of parallel tests that have completed and have been processed +declare FIRST_JSON_OUTPUT=true # true if no output has been added to $JSONFILE yet. + + +########### Cipher suite information +# +declare -i TLS_NR_CIPHERS=0 +declare TLS_CIPHER_HEXCODE=() +declare TLS_CIPHER_OSSL_NAME=() +declare TLS_CIPHER_RFC_NAME=() +declare TLS_CIPHER_SSLVERS=() +declare TLS_CIPHER_KX=() +declare TLS_CIPHER_AUTH=() +declare TLS_CIPHER_ENC=() +declare TLS_CIPHER_EXPORT=() +declare TLS_CIPHER_OSSL_SUPPORTED=() +declare TLS13_OSSL_CIPHERS="TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256:TLS_AES_128_CCM_8_SHA256" + +########### Severity functions and globals +# +INFO=0 +OK=0 +LOW=1 +MEDIUM=2 +HIGH=3 +CRITICAL=4 +SEVERITY_LEVEL=0 + +set_severity_level() { + local severity=$1 + + if [[ "$severity" == LOW ]]; then + SEVERITY_LEVEL=$LOW + elif [[ "$severity" == MEDIUM ]]; then + SEVERITY_LEVEL=$MEDIUM + elif [[ "$severity" == HIGH ]]; then + SEVERITY_LEVEL=$HIGH + elif [[ "$severity" == CRITICAL ]]; then + SEVERITY_LEVEL=$CRITICAL + else + # WARN and FATAL will always be logged as the represent scanning problems + echo "Supported severity levels are LOW, MEDIUM, HIGH, CRITICAL!" + help 1 + fi +} + +show_finding() { + local severity=$1 + + ( [[ "$severity" == DEBUG ]] ) || + ( [[ "$severity" == INFO ]] && [[ $SEVERITY_LEVEL -le $INFO ]] ) || + ( [[ "$severity" == OK ]] && [[ $SEVERITY_LEVEL -le $OK ]] ) || + ( [[ "$severity" == LOW ]] && [[ $SEVERITY_LEVEL -le $LOW ]] ) || + ( [[ "$severity" == MEDIUM ]] && [[ $SEVERITY_LEVEL -le $MEDIUM ]] ) || + ( [[ "$severity" == HIGH ]] && [[ $SEVERITY_LEVEL -le $HIGH ]] ) || + ( [[ "$severity" == CRITICAL ]] && [[ $SEVERITY_LEVEL -le $CRITICAL ]] ) || + ( [[ "$severity" == WARN ]] ) || + ( [[ "$severity" == FATAL ]] ) +} + +########### Output functions + +# For HTML output, replace any HTML reserved characters with the entity name +html_reserved(){ + local output + "$do_html" || return 0 + #sed -e 's/\&/\&/g' -e 's//\>/g' -e 's/"/\"/g' -e "s/'/\'/g" <<< "$1" + output="${1//&/&}" + output="${output///>}" + output="${output//\"/"}" + output="${output//\'/'}" + printf -- "%s" "$output" + return 0 +} + +html_out() { + "$do_html" || return 0 + [[ -n "$HTMLFILE" ]] && [[ ! -d "$HTMLFILE" ]] && printf -- "%b" "$1" >> "$HTMLFILE" +} + +# This is intentionally the same. +safe_echo() { printf -- "%b" "$1"; } +tm_out() { printf -- "%b" "$1"; } +tmln_out() { printf -- "%b" "$1\n"; } + +out() { printf -- "%b" "$1"; html_out "$(html_reserved "$1")"; } +outln() { printf -- "%b" "$1\n"; html_out "$(html_reserved "$1")\n"; } + + +#TODO: Still no shell injection safe but if just run it from the cmd line: that's fine + +# Color print functions, see also https://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html +tm_liteblue() { [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && tm_out "\033[0;32m$1" || tm_out "\033[0;34m$1" ) || tm_out "$1"; tm_off; } # not yet used +pr_liteblue() { tm_liteblue "$1"; [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ) || html_out "$(html_reserved "$1")"; } +tmln_liteblue() { tm_liteblue "$1"; tmln_out; } +prln_liteblue() { pr_liteblue "$1"; outln; } + +tm_blue() { [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && tm_out "\033[1;32m$1" || tm_out "\033[1;34m$1" ) || tm_out "$1"; tm_off; } # used for head lines of single tests +pr_blue() { tm_blue "$1"; [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ) || html_out "$(html_reserved "$1")"; } +tmln_blue() { tm_blue "$1"; tmln_out; } +prln_blue() { pr_blue "$1"; outln; } + +# we should be able to use aliases here +tm_warning() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[0;35m$1" || tm_underline "$1"; tm_off; } # some local problem: one test cannot be done +tmln_warning() { tm_warning "$1"; tmln_out; } # litemagenta +pr_warning() { tm_warning "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || ( [[ "$COLOR" -eq 1 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ); } +prln_warning() { pr_warning "$1"; outln; } + +tm_magenta() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[1;35m$1" || tm_underline "$1"; tm_off; } # fatal error: quitting because of this! +tmln_magenta() { tm_magenta "$1"; tmln_out; } +# different as warning above? +pr_magenta() { tm_magenta "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || ( [[ "$COLOR" -eq 1 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ); } +prln_magenta() { pr_magenta "$1"; outln; } + +tm_litecyan() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[0;36m$1" || tm_out "$1"; tm_off; } # not yet used +tmln_litecyan() { tm_litecyan "$1"; tmln_out; } +pr_litecyan() { tm_litecyan "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_litecyan() { pr_litecyan "$1"; outln; } + +tm_cyan() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[1;36m$1" || tm_out "$1"; tm_off; } # additional hint +tmln_cyan() { tm_cyan "$1"; tmln_out; } +pr_cyan() { tm_cyan "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_cyan() { pr_cyan "$1"; outln; } + +tm_litegrey() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[0;37m$1" || tm_out "$1"; tm_off; } # ... https://github.com/drwetter/testssl.sh/pull/600#issuecomment-276129876 +tmln_litegrey() { tm_litegrey "$1"; tmln_out; } # not really usable on a black background, see .. +prln_litegrey() { pr_litegrey "$1"; outln; } +pr_litegrey() { tm_litegrey "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } + +tm_grey() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[1;30m$1" || tm_out "$1"; tm_off; } +pr_grey() { tm_grey "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +tmln_grey() { tm_grey "$1"; tmln_out; } +prln_grey() { pr_grey "$1"; outln; } + +tm_svrty_good() { [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && tm_out "\033[0;34m$1" || tm_out "\033[0;32m$1" ) || tm_out "$1"; tm_off; } # litegreen (liteblue), This is good +tmln_svrty_good() { tm_svrty_good "$1"; tmln_out; } +pr_svrty_good() { tm_svrty_good "$1"; [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ) || html_out "$(html_reserved "$1")"; } +prln_svrty_good() { pr_svrty_good "$1"; outln; } + +tm_svrty_best() { [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && tm_out "\033[1;34m$1" || tm_out "\033[1;32m$1" ) || tm_out "$1"; tm_off; } # green (blue), This is the best +tmln_svrty_best() { tm_svrty_best "$1"; tmln_out; } +pr_svrty_best() { tm_svrty_best "$1"; [[ "$COLOR" -ge 2 ]] && ( "$COLORBLIND" && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ) || html_out "$(html_reserved "$1")"; } +prln_svrty_best() { pr_svrty_best "$1"; outln; } + +tm_svrty_low() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[1;33m$1" || tm_out "$1"; tm_off; } # yellow brown | academic or minor problem +tmln_svrty_low() { tm_svrty_low "$1"; tmln_out; } +pr_svrty_low() { tm_svrty_low "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_svrty_low() { pr_svrty_low "$1"; outln; } + +tm_svrty_medium() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[0;33m$1" || tm_out "$1"; tm_off; } # brown | it is not a bad problem but you shouldn't do this +pr_svrty_medium() { tm_svrty_medium "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +tmln_svrty_medium(){ tm_svrty_medium "$1"; tmln_out; } +prln_svrty_medium(){ pr_svrty_medium "$1"; outln; } + +tm_svrty_high() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[0;31m$1" || tm_bold "$1"; tm_off; } # litered +pr_svrty_high() { tm_svrty_high "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || ( [[ "$COLOR" -eq 1 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ); } +tmln_svrty_high() { tm_svrty_high "$1"; tmln_out; } +prln_svrty_high() { pr_svrty_high "$1"; outln; } + +tm_svrty_critical() { [[ "$COLOR" -ge 2 ]] && tm_out "\033[1;31m$1" || tm_bold "$1"; tm_off; } # red +pr_svrty_critical() { tm_svrty_critical "$1"; [[ "$COLOR" -ge 2 ]] && html_out "$(html_reserved "$1")" || ( [[ "$COLOR" -eq 1 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")" ); } +tmln_svrty_critical() { tm_svrty_critical "$1"; tmln_out; } +prln_svrty_critical() { pr_svrty_critical "$1"; outln; } + +tm_deemphasize() { tm_out "$1"; } # hook for a weakened screen output, see #600 +pr_deemphasize() { tm_deemphasize "$1"; html_out "$(html_reserved "$1")"; } +tmln_deemphasize() { tm_deemphasize "$1"; tmln_out; } +prln_deemphasize() { pr_deemphasize "$1"; outln; } + +# color=1 functions +tm_off() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[m"; } + +tm_bold() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[1m$1" || tm_out "$1"; tm_off; } +tmln_bold() { tm_bold "$1"; tmln_out; } +pr_bold() { tm_bold "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_bold() { pr_bold "$1" ; outln; } + +NO_ITALICS=false +if [[ $SYSTEM == OpenBSD ]]; then + NO_ITALICS=true +elif [[ $SYSTEM == FreeBSD ]]; then + if [[ ${SYSTEMREV%\.*} -le 9 ]]; then + NO_ITALICS=true + fi +fi +tm_italic() { ( [[ "$COLOR" -ne 0 ]] && ! "$NO_ITALICS" ) && tm_out "\033[3m$1" || tm_out "$1"; tm_off; } +tmln_italic() { tm_italic "$1" ; tmln_out; } +pr_italic() { tm_italic "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_italic() { pr_italic "$1"; outln; } + +tm_strikethru() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[9m$1" || tm_out "$1"; tm_off; } # ugly! +tmln_strikethru() { tm_strikethru "$1"; tmln_out; } +pr_strikethru() { tm_strikethru "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_strikethru() { pr_strikethru "$1" ; outln; } + +tm_underline() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[4m$1" || tm_out "$1"; tm_off; } +tmln_underline() { tm_underline "$1"; tmln_out; } +pr_underline() { tm_underline "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +prln_underline() { pr_underline "$1"; outln; } + +tm_reverse() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[7m$1" || tm_out "$1"; tm_off; } +tm_reverse_bold() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[7m\033[1m$1" || tm_out "$1"; tm_off; } +pr_reverse() { tm_reverse "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +pr_reverse_bold() { tm_reverse_bold "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } + +#pr_headline() { pr_blue "$1"; } +# https://misc.flogisoft.com/bash/tip_colors_and_formatting + +#pr_headline() { [[ "$COLOR" -ge 2 ]] && out "\033[1;30m\033[47m$1" || out "$1"; tm_off; } +tm_headline() { [[ "$COLOR" -ne 0 ]] && tm_out "\033[1m\033[4m$1" || tm_out "$1"; tm_off; } +tmln_headline() { tm_headline "$1"; tmln_out; } +pr_headline() { tm_headline "$1"; [[ "$COLOR" -ne 0 ]] && html_out "$(html_reserved "$1")" || html_out "$(html_reserved "$1")"; } +pr_headlineln() { pr_headline "$1" ; outln; } + +tm_squoted() { tm_out "'$1'"; } +pr_squoted() { out "'$1'"; } +tm_dquoted() { tm_out "\"$1\""; } +pr_dquoted() { out "\"$1\""; } + +# either files couldn't be found or openssl isn't good enough (which shouldn't happen anymore) +tm_local_problem() { tm_warning "Local problem: $1"; } +tmln_local_problem() { tmln_warning "Local problem: $1"; } +pr_local_problem() { pr_warning "Local problem: $1"; } +prln_local_problem() { prln_warning "Local problem: $1"; } + +# general failure +tm_fixme() { tm_warning "Fixme: $1"; } +tmln_fixme() { tmln_warning "Fixme: $1"; } +pr_fixme() { pr_warning "Fixme: $1"; } +prln_fixme() { prln_warning "Fixme: $1"; } + +pr_url() { tm_out "$1"; html_out "$1"; } +pr_boldurl() { tm_bold "$1"; html_out "$1"; } + +### color switcher (see e.g. https://linuxtidbits.wordpress.com/2008/08/11/output-color-on-bash-scripts/ +### https://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x405.html +### no output support for HTML! +set_color_functions() { + local ncurses_tput=true + + if [[ $SYSTEM == OpenBSD ]] && [[ "$TERM" =~ xterm-256 ]]; then + export TERM=xterm + # OpenBSD can't handle 256 colors (yet) in xterm which might lead to ugly errors + # like "tput: not enough arguments (3) for capability `AF'". Not our fault but + # before we get blamed we fix it here. + fi + + # Empty all vars if we have COLOR=0 equals no escape code -- these are globals: + red="" + green="" + brown="" + blue="" + magenta="" + cyan="" + grey="" + yellow="" + off="" + bold="" + underline="" + italic="" + + type -p tput &>/dev/null || return 0 # Hey wait, do we actually have tput / ncurses ? + tput cols &>/dev/null || return 0 # tput under BSDs and GNUs doesn't work either (TERM undefined?) + tput sgr0 &>/dev/null || ncurses_tput=false + if [[ "$COLOR" -ge 2 ]]; then + if $ncurses_tput; then + red=$(tput setaf 1) + green=$(tput setaf 2) + brown=$(tput setaf 3) + blue=$(tput setaf 4) + magenta=$(tput setaf 5) + cyan=$(tput setaf 6) + grey=$(tput setaf 7) + yellow=$(tput setaf 3; tput bold) + else # this is a try for old BSD, see terminfo(5) + red=$(tput AF 1) + green=$(tput AF 2) + brown=$(tput AF 3) + blue=$(tput AF 4) + magenta=$(tput AF 5) + cyan=$(tput AF 6) + grey=$(tput AF 7) + yellow=$(tput AF 3; tput md) + fi + fi + if [[ "$COLOR" -ge 1 ]]; then + if $ncurses_tput; then + bold=$(tput bold) + underline=$(tput sgr 0 1 2>/dev/null) + italic=$(tput sitm) # This doesn't work on FreeBSDi (9,10) and OpenBSD ... + italic_end=$(tput ritm) # ... and this, too + off=$(tput sgr0) + else # this is a try for old BSD, see terminfo(5) + bold=$(tput md) + underline=$(tput us) + italic=$(tput ZH 2>/dev/null) # This doesn't work on FreeBSDi (9,10) and OpenBSD + italic_end=$(tput ZR 2>/dev/null) # ... probably entry missing in /etc/termcap + reverse=$(tput mr) + off=$(tput me) + fi + fi + # FreeBSD 10 understands ESC codes like 'echo -e "\e[3mfoobar\e[23m"', but also no tput for italics +} + +strip_quote() { + # remove color codes (see https://www.commandlinefu.com/commands/view/3584/remove-color-codes-special-characters-with-sed) + # \', leading and all trailing spaces + sed -e "s,$(echo -e "\033")\[[0-9;]*[a-zA-Z],,g" \ + -e "s/\"/\\'/g" \ + -e 's/^ *//g' \ + -e 's/ *$//g' <<< "$1" +} + +# " deconfuse vim\'s syntax highlighting ;-) + +#################### JSON FILE FORMATTING #################### + +fileout_json_footer() { + if "$do_json"; then + if [[ "$SCAN_TIME" -eq 0 ]]; then + fileout_json_finding "scanTime" "WARN" "Scan interrupted" "" "" "" + elif [[ $SEVERITY_LEVEL -lt $LOW ]] ; then + # no scan time in --severity=low and above, also needed for Travis, hackish... + fileout_json_finding "scanTime" "INFO" $SCAN_TIME "" "" "" + fi + printf "]\n" >> "$JSONFILE" + fi + if "$do_pretty_json"; then + if [[ "$SCAN_TIME" -eq 0 ]]; then + echo -e " ],\n \"scanTime\" : \"Scan interrupted\"\n}" >> "$JSONFILE" + else + echo -e " ],\n \"scanTime\" : ${SCAN_TIME}\n}" >> "$JSONFILE" + fi + fi +} + +fileout_json_section() { + case $1 in + 0) echo -e " \"pretest\" : [" ;; + 1) echo -e " \"singleCipher\" : [" ;; + 2) echo -e ",\n \"protocols\" : [" ;; + 3) echo -e ",\n \"grease\" : [" ;; + 4) echo -e ",\n \"ciphers\" : [" ;; + 5) echo -e ",\n \"pfs\" : [" ;; + 6) echo -e ",\n \"serverPreferences\" : [" ;; + 7) echo -e ",\n \"serverDefaults\" : [" ;; + 8) echo -e ",\n \"headerResponse\" : [" ;; + 9) echo -e ",\n \"vulnerabilities\" : [" ;; + 10) echo -e ",\n \"cipherTests\" : [" ;; + 11) echo -e ",\n \"browserSimulations\": [" ;; + *) echo "invalid section" ;; + esac +} + +fileout_section_header() { + local str="" + "$2" && str="$(fileout_section_footer false)" + "$do_pretty_json" && FIRST_FINDING=true && (printf "%s%s\n" "$str" "$(fileout_json_section "$1")") >> "$JSONFILE" + SECTION_FOOTER_NEEDED=true +} + +# arg1: whether to end object too +fileout_section_footer() { + "$do_pretty_json" && printf "\n ]" >> "$JSONFILE" + "$do_pretty_json" && "$1" && echo -e "\n }" >> "$JSONFILE" + SECTION_FOOTER_NEEDED=false +} + +fileout_json_print_parameter() { + local parameter="$1" + local filler="$2" + local value="$3" + local not_last="$4" + local spaces="" + + "$do_json" && \ + spaces=" " || \ + spaces=" " + if [[ -n "$value" ]] || [[ "$parameter" == finding ]]; then + printf "%s%s%s%s" "$spaces" "\"$parameter\"" "$filler" ": \"$value\"" >> "$JSONFILE" + "$not_last" && printf ",\n" >> "$JSONFILE" + fi +} + +fileout_json_finding() { + local target + local finding="$3" + local cve="$4" + local cwe="$5" + local hint="$6" + + if "$do_json"; then + "$FIRST_FINDING" || echo -n "," >> "$JSONFILE" + echo -e " {" >> "$JSONFILE" + fileout_json_print_parameter "id" " " "$1" true + fileout_json_print_parameter "ip" " " "$NODE/$NODEIP" true + fileout_json_print_parameter "port" " " "$PORT" true + fileout_json_print_parameter "severity" " " "$2" true + fileout_json_print_parameter "cve" " " "$cve" true + fileout_json_print_parameter "cwe" " " "$cwe" true + "$GIVE_HINTS" && fileout_json_print_parameter "hint" " " "$hint" true + fileout_json_print_parameter "finding" " " "$finding" false + echo -e "\n }" >> "$JSONFILE" + fi + if "$do_pretty_json"; then + if [[ "$1" == service ]]; then + if [[ $SERVER_COUNTER -gt 1 ]]; then + echo " ," >> "$JSONFILE" + fi + target="$NODE" + $do_mx_all_ips && target="$URI" + echo -e " { + \"targetHost\" : \"$target\", + \"ip\" : \"$NODEIP\", + \"port\" : \"$PORT\", + \"rDNS\" : \"$rDNS\", + \"service\" : \"$finding\"," >> "$JSONFILE" + $do_mx_all_ips && echo -e " \"hostname\" : \"$NODE\"," >> "$JSONFILE" + else + ("$FIRST_FINDING" && echo -n " {" >> "$JSONFILE") || echo -n ",{" >> "$JSONFILE" + echo -e -n "\n" >> "$JSONFILE" + fileout_json_print_parameter "id" " " "$1" true + fileout_json_print_parameter "severity" " " "$2" true + fileout_json_print_parameter "cve" " " "$cve" true + fileout_json_print_parameter "cwe" " " "$cwe" true + "$GIVE_HINTS" && fileout_json_print_parameter "hint" " " "$hint" true + fileout_json_print_parameter "finding" " " "$finding" false + echo -e -n "\n }" >> "$JSONFILE" + fi + fi +} + +##################### FILE FORMATTING ######################### + +fileout_pretty_json_banner() { + local target + + if ! "$do_mass_testing"; then + [[ -z "$NODE" ]] && parse_hn_port "${URI}" + # NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now --> wrong place + target="$NODE" + $do_mx_all_ips && target="$URI" + fi + + echo -e " \"Invocation\" : \"$PROG_NAME $CMDLINE\", + \"at\" : \"$HNAME:$OPENSSL_LOCATION\", + \"version\" : \"$VERSION ${GIT_REL_SHORT:-$CVS_REL_SHORT} from $REL_DATE\", + \"openssl\" : \"$OSSL_NAME $OSSL_VER from $OSSL_BUILD_DATE\", + \"startTime\" : \"$START_TIME\", + \"scanResult\" : [" +} + +fileout_banner() { + if "$JSONHEADER"; then + # "$do_json" && # here we maybe should add a banner, too + "$do_pretty_json" && (printf "%s\n" "$(fileout_pretty_json_banner)") >> "$JSONFILE" + fi +} + +fileout_separator() { + if "$JSONHEADER"; then + "$do_pretty_json" && echo " ," >> "$JSONFILE" + "$do_json" && echo -n "," >> "$JSONFILE" + fi +} + +fileout_footer() { + if "$JSONHEADER"; then + fileout_json_footer + fi + # CSV: no footer + return 0 +} + +fileout_insert_warning() { + # See #815. Make sure we don't mess up the JSON PRETTY format if we complain with a client side warning. + # This should only be called if an *extra* warning will be printed (previously: 'fileout "WARN" ' + # arg1: json identifier, arg2: normally "WARN", arg3: finding + # + # Also, we have to be careful with any form of mass testing so that a warning won't lead to an invalid JSON + # file. As any child will do any check as well (to be reconsidered later), we don't need also the parent to issue + # warnings upfront, see #1169. As a detection we'll use --file/-iL as in the children jobs it'll be removed: + [[ "$CMDLINE=" =~ --file ]] && return 0 + [[ "$CMDLINE=" =~ -iL ]] && return 0 + # Note we still have the message on screen + in HTML which is not as optimal as it could be + + if "$do_pretty_json"; then + echo -e " \"clientProblem${CLIENT_PROB_NO}\" : [" >>"$JSONFILE" + CLIENT_PROB_NO=$((CLIENT_PROB_NO + 1)) + FIRST_FINDING=true # make sure we don't have a comma here + fi + fileout "$1" "$2" "$3" + if "$do_pretty_json"; then + echo -e "\n ]," >>"$JSONFILE" + fi +} + +fileout_csv_finding() { + safe_echo "\"$1\"," >> "$CSVFILE" + safe_echo "\"$2\"," >> "$CSVFILE" + safe_echo "\"$3\"," >> "$CSVFILE" + safe_echo "\"$4\"," >> "$CSVFILE" + safe_echo "\"$5\"," >> "$CSVFILE" + safe_echo "\"$6\"," >> "$CSVFILE" + if "$GIVE_HINTS"; then + safe_echo "\"$7\"," >> "$CSVFILE" + safe_echo "\"$8\"\n" >> "$CSVFILE" + else + safe_echo "\"$7\"\n" >> "$CSVFILE" + fi +} + + +# ID, SEVERITY, FINDING, CVE, CWE, HINT +fileout() { + local severity="$2" + local cve="$4" + local cwe="$5" + local hint="$6" + + if ( "$do_pretty_json" && [[ "$1" == service ]] ) || show_finding "$severity"; then + local finding=$(strip_lf "$(newline_to_spaces "$(strip_quote "$3")")") # additional quotes will mess up screen output + [[ -e "$JSONFILE" ]] && [[ ! -d "$JSONFILE" ]] && fileout_json_finding "$1" "$severity" "$finding" "$cve" "$cwe" "$hint" + "$do_csv" && [[ -n "$CSVFILE" ]] && [[ ! -d "$CSVFILE" ]] && \ + fileout_csv_finding "$1" "$NODE/$NODEIP" "$PORT" "$severity" "$finding" "$cve" "$cwe" "$hint" + "$FIRST_FINDING" && FIRST_FINDING=false + fi +} + + +json_header() { + local fname_prefix + local filename_provided=false + + [[ -n "$JSONFILE" ]] && [[ ! -d "$JSONFILE" ]] && filename_provided=true + # Similar to HTML: Don't create headers and footers in the following scenarios: + # * no JSON/CSV output is being created. + # * mass testing is being performed and each test will have its own file. + # * this is an individual test within a mass test and all output is being placed in a single file. + ! "$do_json" && ! "$do_pretty_json" && JSONHEADER=false && return 0 + "$do_mass_testing" && ! "$filename_provided" && JSONHEADER=false && return 0 + "$CHILD_MASS_TESTING" && "$filename_provided" && JSONHEADER=false && return 0 + + if "$do_display_only"; then + fname_prefix="local-ciphers" + elif "$do_mass_testing"; then + : + elif "$do_mx_all_ips"; then + fname_prefix="${FNAME_PREFIX}mx-${URI}" + else + # ensure NODE, URL_PATH, PORT, IPADDR and IP46ADDR are set + ! "$filename_provided" && [[ -z "$NODE" ]] && parse_hn_port "${URI}" + fname_prefix="${FNAME_PREFIX}${NODE}_p${PORT}" + fi + if [[ -z "$JSONFILE" ]]; then + JSONFILE="$fname_prefix-$(date +"%Y%m%d-%H%M".json)" + elif [[ -d "$JSONFILE" ]]; then + JSONFILE="$JSONFILE/${fname_prefix}-$(date +"%Y%m%d-%H%M".json)" + fi + # Silently reset APPEND var if the file doesn't exist as otherwise it won't be created + if "$APPEND" && [[ ! -s "$JSONFILE" ]]; then + APPEND=false + fi + if "$APPEND"; then + JSONHEADER=false + else + [[ -s "$JSONFILE" ]] && fatal "non-empty \"$JSONFILE\" exists. Either use \"--append\" or (re)move it" $ERR_FCREATE + "$do_json" && echo "[" > "$JSONFILE" + "$do_pretty_json" && echo "{" > "$JSONFILE" + fi + return 0 +} + + +csv_header() { + local fname_prefix + local filename_provided=false + + [[ -n "$CSVFILE" ]] && [[ ! -d "$CSVFILE" ]] && filename_provided=true + # CSV similar to JSON + ! "$do_csv" && CSVHEADER=false && return 0 + "$do_mass_testing" && ! "$filename_provided" && CSVHEADER=false && return 0 + "$CHILD_MASS_TESTING" && "$filename_provided" && CSVHEADER=false && return 0 + + if "$do_display_only"; then + fname_prefix="local-ciphers" + elif "$do_mass_testing"; then + : + elif "$do_mx_all_ips"; then + fname_prefix="${FNAME_PREFIX}mx-${URI}" + else + # ensure NODE, URL_PATH, PORT, IPADDR and IP46ADDR are set + ! "$filename_provided" && [[ -z "$NODE" ]] && parse_hn_port "${URI}" + fname_prefix="${FNAME_PREFIX}${NODE}_p${PORT}" + fi + if [[ -z "$CSVFILE" ]]; then + CSVFILE="${fname_prefix}-$(date +"%Y%m%d-%H%M".csv)" + elif [[ -d "$CSVFILE" ]]; then + CSVFILE="$CSVFILE/${fname_prefix}-$(date +"%Y%m%d-%H%M".csv)" + fi + # Silently reset APPEND var if the file doesn't exist as otherwise it won't be created + if "$APPEND" && [[ ! -s "$CSVFILE" ]]; then + APPEND=false + fi + if "$APPEND"; then + CSVHEADER=false + else + [[ -s "$CSVFILE" ]] && fatal "non-empty \"$CSVFILE\" exists. Either use \"--append\" or (re)move it" $ERR_FCREATE + touch "$CSVFILE" + if "$GIVE_HINTS"; then + fileout_csv_finding "id" "fqdn/ip" "port" "severity" "finding" "cve" "cwe" "hint" + else + fileout_csv_finding "id" "fqdn/ip" "port" "severity" "finding" "cve" "cwe" + fi + fi + return 0 +} + + +################# JSON FILE FORMATTING END. HTML START #################### + +html_header() { + local fname_prefix + local filename_provided=false + + [[ -n "$HTMLFILE" ]] && [[ ! -d "$HTMLFILE" ]] && filename_provided=true + # Don't create HTML headers and footers in the following scenarios: + # * HTML output is not being created. + # * mass testing is being performed and each test will have its own HTML file. + # * this is an individual test within a mass test and all HTML output is being placed in a single file. + ! "$do_html" && HTMLHEADER=false && return 0 + "$do_mass_testing" && ! "$filename_provided" && HTMLHEADER=false && return 0 + "$CHILD_MASS_TESTING" && "$filename_provided" && HTMLHEADER=false && return 0 + + if "$do_display_only"; then + fname_prefix="local-ciphers" + elif "$do_mass_testing"; then + : + elif "$do_mx_all_ips"; then + fname_prefix="${FNAME_PREFIX}mx-${URI}" + else + # ensure NODE, URL_PATH, PORT, IPADDR and IP46ADDR are set + ! "$filename_provided" && [[ -z "$NODE" ]] && parse_hn_port "${URI}" + fname_prefix="${FNAME_PREFIX}${NODE}_p${PORT}" + fi + if [[ -z "$HTMLFILE" ]]; then + HTMLFILE="$fname_prefix-$(date +"%Y%m%d-%H%M".html)" + elif [[ -d "$HTMLFILE" ]]; then + HTMLFILE="$HTMLFILE/$fname_prefix-$(date +"%Y%m%d-%H%M".html)" + fi + # Silently reset APPEND var if the file doesn't exist as otherwise it won't be created + if "$APPEND" && [[ ! -s "$HTMLFILE" ]]; then + APPEND=false + fi + if "$APPEND"; then + HTMLHEADER=false + else + [[ -s "$HTMLFILE" ]] && fatal "non-empty \"$HTMLFILE\" exists. Either use \"--append\" or (re)move it" $ERR_FCREATE + html_out "\n" + html_out "\n" + html_out "\n" + html_out "\n" + html_out "\n" + html_out "\n" + html_out "testssl.sh\n" + html_out "\n" + html_out "\n" + html_out "
\n"
+     fi
+     return 0
+}
+
+html_banner() {
+     if "$CHILD_MASS_TESTING" && "$HTMLHEADER"; then
+          html_out "## Scan started as: \"$PROG_NAME $CMDLINE\"\n"
+          html_out "## at $HNAME:$OPENSSL_LOCATION\n"
+          html_out "## version testssl: $VERSION ${GIT_REL_SHORT:-$CVS_REL_SHORT} from $REL_DATE\n"
+          html_out "## version openssl: \"$OSSL_NAME $OSSL_VER\" from \"$OSSL_BUILD_DATE\")\n\n"
+     fi
+}
+
+html_footer() {
+     if "$HTMLHEADER"; then
+          html_out "
\n" + html_out "\n" + html_out "\n" + fi + return 0 +} + +################# HTML FILE FORMATTING END #################### + +prepare_logging() { + # arg1: for testing mx records name we put a name of logfile in here, otherwise we get strange file names + local fname_prefix="$1" + local filename_provided=false + + [[ -n "$LOGFILE" ]] && [[ ! -d "$LOGFILE" ]] && filename_provided=true + + # Similar to html_header(): + ! "$do_logging" && return 0 + "$do_mass_testing" && ! "$filename_provided" && return 0 + "$CHILD_MASS_TESTING" && "$filename_provided" && return 0 + + [[ -z "$fname_prefix" ]] && fname_prefix="${FNAME_PREFIX}${NODE}_p${PORT}" + + if [[ -z "$LOGFILE" ]]; then + LOGFILE="$fname_prefix-$(date +"%Y%m%d-%H%M".log)" + elif [[ -d "$LOGFILE" ]]; then + # actually we were instructed to place all files in a DIR instead of the current working dir + LOGFILE="$LOGFILE/$fname_prefix-$(date +"%Y%m%d-%H%M".log)" + else + : # just for clarity: a log file was specified, no need to do anything else + fi + + if ! "$APPEND"; then + [[ -s "$LOGFILE" ]] && fatal "non-empty \"$LOGFILE\" exists. Either use \"--append\" or (re)move it" $ERR_FCREATE + fi + tmln_out "## Scan started as: \"$PROG_NAME $CMDLINE\"" >>"$LOGFILE" + tmln_out "## at $HNAME:$OPENSSL_LOCATION" >>"$LOGFILE" + tmln_out "## version testssl: $VERSION ${GIT_REL_SHORT:-$CVS_REL_SHORT} from $REL_DATE" >>"$LOGFILE" + tmln_out "## version openssl: \"$OSSL_VER\" from \"$OSSL_BUILD_DATE\")\n" >>"$LOGFILE" + exec > >(tee -a -i "$LOGFILE") +} + +################### FILE FORMATTING END ######################### + +###### START helper function definitions ###### + +if [[ "${BASH_VERSINFO[0]}" == 3 ]]; then + # older bash can do this only (MacOS X), even SLES 11, see #697 + toupper() { tr 'a-z' 'A-Z' <<< "$1"; } + tolower() { tr '[:upper:]' '[:lower:]' <<< "$1"; } +else + toupper() { echo -n "${1^^}"; } + tolower() { echo -n "${1,,}"; } +fi + +get_last_char() { + echo "${1:~0}" # "${string: -1}" would work too (both also in bash 3.2) +} + # Checking for last char. If already a separator supplied, we don't need an additional one +debugme() { + [[ "$DEBUG" -ge 2 ]] && "$@" + return 0 +} + +hex2dec() { + echo $((16#$1)) +} + +# convert 414243 into ABC +hex2ascii() { + for (( i=0; i<${#1}; i+=2 )); do + # 2>/dev/null added because 'warning: command substitution: ignored null byte in input' + # --> didn't help though + printf "\x${1:$i:2}" 2>/dev/null + done +} + +# convert decimal number < 256 to hex +dec02hex() { + printf "x%02x" "$1" +} + +# convert decimal number between 256 and < 256*256 to hex +dec04hex() { + local a=$(printf "%04x" "$1") + printf "x%02s, x%02s" "${a:0:2}" "${a:2:2}" +} + + +# trim spaces for BSD and old sed +count_lines() { + #echo "${$(wc -l <<< "$1")// /}" + # ^^ bad substitution under bash, zsh ok. For some reason this does the trick: + echo $(wc -l <<< "$1") +} + +count_words() { + #echo "${$(wc -w <<< "$1")// /}" + # ^^ bad substitution under bash, zsh ok. For some reason this does the trick: + echo $(wc -w <<< "$1") +} + +count_ciphers() { + echo $(wc -w <<< "${1//:/ }") +} + +#arg1: TLS 1.2 and below ciphers +#arg2: TLS 1.3 ciphers +#arg3: options (e.g., -V) +actually_supported_osslciphers() { + local tls13_ciphers="$TLS13_OSSL_CIPHERS" + + [[ "$2" != ALL ]] && tls13_ciphers="$2" + if "$HAS_CIPHERSUITES"; then + $OPENSSL ciphers $3 $OSSL_CIPHERS_S -ciphersuites "$tls13_ciphers" "$1" 2>/dev/null || echo "" + elif [[ -n "$tls13_ciphers" ]]; then + $OPENSSL ciphers $3 $OSSL_CIPHERS_S "$tls13_ciphers:$1" 2>/dev/null || echo "" + else + $OPENSSL ciphers $OSSL_CIPHERS_S $3 "$1" 2>/dev/null || echo "" + fi +} + +# Given a protocol (arg1) and a list of ciphers (arg2) that is formatted as +# ", xx,xx, xx,xx, xx,xx, xx,xx" remove any TLSv1.3 ciphers if the protocol +# is less than 04 and remove any TLSv1.2-only ciphers if the protocol is less +# than 03. +strip_inconsistent_ciphers() { + local -i proto=0x$1 + local cipherlist="$2" + + [[ $proto -lt 4 ]] && cipherlist="${cipherlist//, 13,0[0-9a-fA-F]/}" + if [[ $proto -lt 3 ]]; then + cipherlist="${cipherlist//, 00,3[b-fB-F]/}" + cipherlist="${cipherlist//, 00,40/}" + cipherlist="${cipherlist//, 00,6[7-9a-dA-D]/}" + cipherlist="${cipherlist//, 00,9[c-fC-F]/}" + cipherlist="${cipherlist//, 00,[abAB][0-9a-fA-F]/}" + cipherlist="${cipherlist//, 00,[cC][0-5]/}" + cipherlist="${cipherlist//, 16,[bB][7-9aA]/}" + cipherlist="${cipherlist//, [cC]0,2[3-9a-fA-F]/}" + cipherlist="${cipherlist//, [cC]0,3[01278a-fA-F]/}" + cipherlist="${cipherlist//, [cC]0,[4-9aA][0-9a-fA-F]/}" + cipherlist="${cipherlist//, [cC][cC],1[345]/}" + cipherlist="${cipherlist//, [cC][cC],[aA][89a-eA-E]/}" + fi + echo "$cipherlist" + return 0 +} + +newline_to_spaces() { + tr '\n' ' ' <<< "$1" | sed 's/ $//' +} + +colon_to_spaces() { + echo "${1//:/ }" +} + +strip_lf() { + tr -d '\n' <<< "$1" | tr -d '\r' +} + +strip_spaces() { + echo "${1// /}" +} + +# https://web.archive.org/web/20121022051228/http://codesnippets.joyent.com/posts/show/1816 +strip_leading_space() { + printf "%s" "${1#"${1%%[![:space:]]*}"}" +} +strip_trailing_space() { + printf "%s" "${1%"${1##*[![:space:]]}"}" +} + + +# retrieve cipher from ServerHello (via openssl) +get_cipher() { + local cipher="" + local server_hello="$(cat -v "$1")" + # This and two other following instances are not best practice and normally a useless use of "cat", see + # https://web.archive.org/web/20160711205930/http://porkmail.org/era/unix/award.html#uucaletter + # However there seem to be cases where the preferred $(< "$1") logic has a problem. + # Esepcially with bash 3.2 (Mac OS X) and when on the server side binary chars + # are returned, see https://stackoverflow.com/questions/7427262/how-to-read-a-file-into-a-variable-in-shell#22607352 + # and https://github.com/drwetter/testssl.sh/issues/1292 + # Performance measurements showed no to barely measureable penalty (1s displayed in 9 tries). + + if [[ "$server_hello" =~ Cipher\ *:\ ([A-Z0-9]+-[A-Za-z0-9\-]+|TLS_[A-Za-z0-9_]+|SSL_[A-Za-z0-9_]+) ]]; then + cipher="${BASH_REMATCH##* }" + elif [[ "$server_hello" =~ (New|Reused)", "(SSLv[23]|TLSv1(\.[0-3])?(\/SSLv3)?)", Cipher is "([A-Z0-9]+-[A-Za-z0-9\-]+|TLS_[A-Za-z0-9_]+) ]]; then + cipher="${BASH_REMATCH##* }" + fi + tm_out "$cipher" +} + +# retrieve protocol from ServerHello (via openssl) +get_protocol() { + local protocol="" + local server_hello="$(cat -v "$1")" + + if [[ "$server_hello" =~ Protocol\ *:\ (SSLv[23]|TLSv1(\.[0-3])?) ]]; then + protocol="${BASH_REMATCH##* }" + elif [[ "$server_hello" =~ (New|Reused)", TLSv1.3, Cipher is "TLS_[A-Z0-9_]+ ]]; then + # Note: When OpenSSL prints "New, , Cipher is ", is the + # negotiated cipher, but is not the negotiated protocol. Instead, it is + # the SSL/TLS protocol that first defined . Since the ciphers that were + # first defined for TLSv1.3 may only be used with TLSv1.3, this line may be used + # to determine whether TLSv1.3 was negotiated, but if another protocol is specified + # on this line, then this line does not indicate the actual protocol negotiated. Also, + # only TLSv1.3 cipher suites have names that begin with TLS_, which provides additional + # assurance that the above match will only succeed if TLSv1.3 was negotiated. + protocol="TLSv1.3" + fi + tm_out "$protocol" +} + +is_number() { + [[ "$1" =~ ^[1-9][0-9]*$ ]] && \ + return 0 || \ + return 1 +} + +is_ipv4addr() { + local octet="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])" + local ipv4address="$octet\\.$octet\\.$octet\\.$octet" + + [[ -z "$1" ]] && return 1 + # more than numbers, important for hosts like AAA.BBB.CCC.DDD.in-addr.arpa.DOMAIN.TLS + [[ -n $(tr -d '0-9\.' <<< "$1") ]] && return 1 + + grep -Eq "$ipv4address" <<< "$1" && \ + return 0 || \ + return 1 +} + +# a bit easier +is_ipv6addr() { + [[ -z "$1" ]] && return 1 + # less than 2x ":" + [[ $(count_lines "$(tr ':' '\n' <<< "$1")") -le 1 ]] && \ + return 1 + #check on chars allowed: + [[ -n "$(tr -d '0-9:a-fA-F ' <<< "$1" | sed -e '/^$/d')" ]] && \ + return 1 + return 0 +} + +# now some function for the integrated BIGIP F5 Cookie detector (see https://github.com/drwetter/F5-BIGIP-Decoder) + +f5_hex2ip() { + debugme echo "$1" + echo $((16#${1:0:2})).$((16#${1:2:2})).$((16#${1:4:2})).$((16#${1:6:2})) +} +f5_hex2ip6() { + debugme echo "$1" + echo "[${1:0:4}:${1:4:4}:${1:8:4}:${1:12:4}.${1:16:4}:${1:20:4}:${1:24:4}:${1:28:4}]" +} + +f5_determine_routeddomain() { + local tmp + tmp="${1%%o*}" + echo "${tmp/rd/}" +} + +f5_ip_oldstyle() { + local tmp + local a b c d + + tmp="${1/%.*}" # until first dot + tmp="$(printf "%08x" "$tmp")" # convert the whole thing to hex, now back to ip (reversed notation: + tmp="$(f5_hex2ip $tmp)" # transform to ip with reversed notation + IFS="." read -r a b c d <<< "$tmp" # reverse it + echo $d.$c.$b.$a +} + +f5_port_decode() { + local tmp + + tmp="$(strip_lf "$1")" # remove lf if there is one + tmp="${tmp/.0000/}" # to be sure remove trailing zeros with a dot + tmp="${tmp#*.}" # get the port + tmp="$(printf "%04x" "${tmp}")" # to hex + if [[ ${#tmp} -eq 4 ]]; then + : + elif [[ ${#tmp} -eq 3 ]]; then # fill it up with leading zeros if needed + tmp=0${tmp} + elif [[ ${#tmp} -eq 2 ]]; then + tmp=00${tmp} + fi + echo $((16#${tmp:2:2}${tmp:0:2})) # reverse order and convert it from hex to dec +} + + + +###### END helper function definitions ###### + +# prints out multiple lines in $1, left aligned by spaces in $2 +out_row_aligned() { + local first=true + + while read line; do + "$first" && \ + first=false || \ + out "$2" + outln "$line" + done <<< "$1" +} + +# prints text over multiple lines, trying to make no line longer than $max_width. +# Each line is indented with $spaces. +out_row_aligned_max_width() { + local text="$1" + local spaces="$2" + local -i max_width="$3" + local -i i len + local cr=$'\n' + local line + local first=true + + max_width=$max_width-${#spaces} + len=${#text} + while true; do + if [[ $len -lt $max_width ]]; then + # If the remaining text to print is shorter than $max_width, + # then just print it. + i=$len + else + # Find the final space character in the text that is less than + # $max_width characters into the remaining text, and make the + # text up to that space character the next line to print. + line="${text:0:max_width}" + line="${line% *}" + i="${#line}" + if [[ $i -eq $max_width ]]; then + # If there are no space characters in the first $max_width + # characters of the remaining text, then make the text up + # to the first space the next line to print. If there are + # no space characters in the remaining text, make the + # remaining text the next line to print. + line="${text#* }" + i=$len-${#line} + [[ $i -eq 0 ]] && i=$len + fi + fi + if ! "$first"; then + tm_out "${cr}${spaces}" + fi + tm_out "${text:0:i}" + [[ $i -eq $len ]] && break + len=$len-$i-1 + i=$i+1 + text="${text:i:len}" + first=false + [[ $len -eq 0 ]] && break + done + return 0 +} + +out_row_aligned_max_width_by_entry() { + local text="$1" + local spaces="$2" + local -i max_width="$3" + local print_function="$4" + local resp entry prev_entry=" " + + resp="$(out_row_aligned_max_width "$text" "$spaces" "$max_width")" + while read -d " " entry; do + if [[ -n "$entry" ]]; then + $print_function "$entry" + elif [[ -n "$prev_entry" ]]; then + outln; out " " + fi + out " " + prev_entry="$entry" + done <<< "$resp" +} + +print_fixed_width() { + local text="$1" + local -i i len width="$2" + local print_function="$3" + + len=${#text} + $print_function "$text" + for (( i=len; i <= width; i++ )); do + out " " + done +} + +# saves $TMPFILE or file supplied in $2 under name "$TEMPDIR/$NODEIP.$1". +# Note: after finishing $TEMPDIR will be removed unless DEBUG >=1 +tmpfile_handle() { + local savefile="$2" + [[ -z "$savefile" ]] && savefile=$TMPFILE +#FIXME: make sure/find out if we do not need $TEMPDIR/$NODEIP.$1" if debug=0. We would save fs access here + mv $savefile "$TEMPDIR/$NODEIP.$1" 2>/dev/null + [[ $ERRFILE =~ dev.null ]] && return 0 || \ + mv $ERRFILE "$TEMPDIR/$NODEIP.${1//.txt/}.errorlog" 2>/dev/null + return 0 +} + +# arg1: line with comment sign, tabs and so on +filter_input() { + sed -e 's/#.*$//' -e '/^$/d' <<< "$1" | tr -d '\n' | tr -d '\t' | tr -d '\r' +} + +# Dl's any URL (arg1) via HTTP 1.1 GET from port 80, arg2: file to store http body. +# Proxy is not honored yet (see cmd line switches) -- except when using curl or wget. +# There the environment variable is used automatically +# Currently it is being used by check_revocation_crl() only. +http_get() { + local proto z + local node="" query="" + local dl="$2" + local useragent="$UA_STD" + local jsonID="http_get" + + "$SNEAKY" && useragent="$UA_SNEAKY" + + if type -p curl &>/dev/null; then + if [[ -z "$PROXY" ]]; then + curl -s --noproxy '*' -A $''"$useragent"'' -o $dl "$1" + else + # for the sake of simplicity assume the proxy is using http + curl -s -x $PROXYIP:$PROXYPORT -A $''"$useragent"'' -o $dl "$1" + fi + return $? + elif type -p wget &>/dev/null; then + # wget has no proxy command line. We need to use http_proxy instead. And for the sake of simplicity + # assume the GET protocol we query is using http -- http_proxy is the $ENV not for the connection TO + # the proxy, but for the protocol we query THROUGH the proxy + if [[ -z "$PROXY" ]]; then + wget --no-proxy -q -U $''"$useragent"'' -O $dl "$1" + else + if [[ -z "$http_proxy" ]]; then + http_proxy=http://$PROXYIP:$PROXYPORT wget -q -U $''"$useragent"'' -O $dl "$1" + else + wget -q -U $''"$useragent"'' -O $dl "$1" + fi + fi + return $? + else + # Worst option: slower and hiccups with chunked transfers. Workaround for the + # latter is using HTTP/1.0. We do not support https here, yet. + # First the URL will be split + IFS=/ read -r proto z node query <<< "$1" + proto=${proto%:} + if [[ "$proto" != http ]]; then + pr_warning "protocol $proto not supported yet" + fileout "$jsonID" "DEBUG" "protocol $proto not supported yet" + return 6 + fi + if [[ -n $PROXY ]]; then + # PROXYNODE works better than PROXYIP on modern versions of squid. \ + # We don't reuse the code in fd_socket() as there's initial CONNECT which makes problems + if ! exec 33<> /dev/tcp/${PROXYNODE}/${PROXYPORT}; then + outln + pr_warning "$PROG_NAME: unable to open a socket to proxy $PROXYNODE:$PROXYPORT" + fileout "$jsonID" "DEBUG" "$PROG_NAME: unable to open a socket to proxy $PROXYNODE:$PROXYPORT" + return 6 + else + printf -- "%b" "GET $proto://$node/$query HTTP/1.0\r\nUser-Agent: $useragent\r\nHost: $node\r\nAccept: */*\r\n\r\n" >&33 + fi + else + IFS=/ read -r proto z node query <<< "$1" + exec 33<>/dev/tcp/$node/80 + printf -- "%b" "GET /$query HTTP/1.0\r\nUser-Agent: $useragent\r\nHost: $node\r\nAccept: */*\r\n\r\n" >&33 + fi + # Strip HTTP header. When in Debug Mode we leave the raw data in place + if [[ $DEBUG -ge 1 ]]; then + cat <&33 >${dl}.raw + cat ${dl}.raw | sed '1,/^[[:space:]]*$/d' >${dl} + else + cat <&33 | sed '1,/^[[:space:]]*$/d' >${dl} + fi + exec 33<&- + exec 33>&- + [[ -s "$dl" ]] && return 0 || return 1 + fi +} + +# Outputs the headers when downloading any URL (arg1) via HTTP 1.1 GET from port 80. +# Only works if curl or wget is available. +# There the environment variable is used automatically +# Currently it is being used by check_pwnedkeys() only. +http_get_header() { + local proto z + local node="" query="" + local dl="$2" + local useragent="$UA_STD" + local jsonID="http_get_header" + local headers + local -i ret + + "$SNEAKY" && useragent="$UA_SNEAKY" + + if type -p curl &>/dev/null; then + if [[ -z "$PROXY" ]]; then + headers="$(curl --head -s --noproxy '*' -A $''"$useragent"'' "$1")" + else + # for the sake of simplicity assume the proxy is using http + headers="$(curl --head -s -x $PROXYIP:$PROXYPORT -A $''"$useragent"'' "$1")" + fi + ret=$? + [[ $ret -eq 0 ]] && tm_out "$headers" + return $ret + elif type -p wget &>/dev/null; then + # wget has no proxy command line. We need to use http_proxy instead. And for the sake of simplicity + # assume the GET protocol we query is using http -- http_proxy is the $ENV not for the connection TO + # the proxy, but for the protocol we query THROUGH the proxy + if [[ -z "$PROXY" ]]; then + headers="$(wget --no-proxy -q -S -U $''"$useragent"'' -O /dev/null "$1" 2>&1)" + else + if [[ -z "$http_proxy" ]]; then + headers="$(http_proxy=http://$PROXYIP:$PROXYPORT wget -q -S -U $''"$useragent"'' -O /dev/null "$1" 2>&1)" + else + headers="$(wget -q -S -U $''"$useragent"'' -O /dev/null "$1" 2>&1)" + fi + fi + ret=$? + [[ $ret -eq 0 ]] && tm_out "$headers" + # wget(1): "8: Server issued an error response.". Happens e.g. when 404 is returned. However also if the call wasn't correct (400) + # So we assume for now that everything is submitted correctly. We parse the error code too later + [[ $ret -eq 8 ]] && ret=0 && tm_out "$headers" + return $ret + else + return 1 + fi +} + +ldap_get() { + local ldif + local -i success + local crl="$1" + local tmpfile="$2" + local jsonID="$3" + + if type -p curl &>/dev/null; then + # proxy handling? + ldif="$(curl -s "$crl")" + [[ $? -eq 0 ]] || return 1 + awk '/certificateRevocationList/ { print $2 }' <<< "$ldif" | $OPENSSL base64 -d -A -out "$tmpfile" 2>/dev/null + [[ -s "$tmpfile" ]] || return 1 + return 0 + else + pr_litecyan " (for LDAP CRL check install \"curl\")" + fileout "$jsonID" "INFO" "LDAP CRL revocation check needs \"curl\"" + return 2 + fi +} + +# checks whether the public key in arg1 appears in the https://pwnedkeys.com/ database. +# arg1: file containing certificate +# arg2: public key algorithm +# arg3 key size +# Responses are as follows: +# 0 - not checked +# 1 - key not found in database +# 2 - key found in database +# 7 - network/proxy failure +check_pwnedkeys() { + local cert="$1" + local cert_key_algo="$2" + local -i cert_keysize="$3" + local pubkey curve response + + "$PHONE_OUT" || return 0 + + # https://pwnedkeys.com only keeps records on 1024 bit and larger RSA keys, + # as well as elliptic-curve keys on the P-256, P-384, and P-521 curves. + if [[ "$cert_key_algo" =~ RSA ]] || [[ "$cert_key_algo" =~ rsa ]]; then + [[ $cert_keysize -ge 1024 ]] || return 0 + elif [[ "$cert_key_algo" =~ ecdsa ]] || [[ "$cert_key_algo" == *ecPublicKey ]]; then + [[ $cert_keysize -eq 256 ]] || [[ $cert_keysize -eq 384 ]] || \ + [[ $cert_keysize -eq 521 ]] || return 0 + else + return 0 + fi + + pubkey="$($OPENSSL x509 -in "$cert" -pubkey -noout 2>/dev/null)" + # If it is an elliptic curve key, check that it is P-256, P-384, or P-521. + if [[ "$cert_key_algo" =~ ecdsa ]] || [[ "$cert_key_algo" == *ecPublicKey ]]; then + curve="$($OPENSSL ec -pubin -text <<< "$pubkey" 2>/dev/null)" + curve="${curve#*ASN1 OID: }" + [[ "$curve" == prime256v1* ]] || [[ "$curve" == secp384r1* ]] || \ + [[ "$curve" == secp521r1* ]] || return 0 + fi + fingerprint="$($OPENSSL pkey -pubin -outform DER <<< "$pubkey" 2>/dev/null | $OPENSSL dgst -sha256 -hex 2>/dev/null)" + fingerprint="${fingerprint#*= }" + response="$(http_get_header "https://v1.pwnedkeys.com/$fingerprint")" + # Handle curl's/wget's connectivity exit codes + case $? in + 4|5|7) return 7 ;; + 1|2|3|6) return 0 ;; + # unknown codes we just say "not checked" + esac + if [[ "$response" =~ "404 Not Found" ]]; then + return 1 + elif [[ "$response" =~ "200 OK" ]]; then + return 2 + else + return 0 + fi +} + +check_revocation_crl() { + local crl="$1" + local jsonID="$2" + local tmpfile="" + local scheme retcode + local -i success + + "$PHONE_OUT" || return 0 + [[ -n "$GOOD_CA_BUNDLE" ]] || return 0 + scheme="$(tolower "${crl%%://*}")" + # The code for obtaining CRLs only supports LDAP, HTTP, and HTTPS URLs. + [[ "$scheme" == http ]] || [[ "$scheme" == https ]] || [[ "$scheme" == ldap ]] || return 0 + tmpfile=$TEMPDIR/${NODE}-${NODEIP}.${crl##*\/} || exit $ERR_FCREATE + if [[ "$scheme" == ldap ]]; then + ldap_get "$crl" "$tmpfile" "$jsonID" + success=$? + else + http_get "$crl" "$tmpfile" + success=$? + fi + if [[ $success -eq 2 ]]; then + return 0 + elif [[ $success -ne 0 ]]; then + out ", " + pr_warning "retrieval of \"$crl\" failed" + fileout "$jsonID" "WARN" "CRL retrieval from $crl failed" + return 1 + fi + # -crl_download could be more elegant but is supported from 1.0.2 onwards only + $OPENSSL crl -inform DER -in "$tmpfile" -outform PEM -out "${tmpfile%%.crl}.pem" &>$ERRFILE + if [[ $? -ne 0 ]]; then + pr_warning "conversion of \"$tmpfile\" failed" + fileout "$jsonID" "WARN" "conversion of CRL to PEM format failed" + return 1 + fi + if grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR/intermediatecerts.pem; then + $OPENSSL verify -crl_check -CAfile <(cat $ADDITIONAL_CA_FILES "$GOOD_CA_BUNDLE" "${tmpfile%%.crl}.pem") -untrusted $TEMPDIR/intermediatecerts.pem $HOSTCERT &> "${tmpfile%%.crl}.err" + else + $OPENSSL verify -crl_check -CAfile <(cat $ADDITIONAL_CA_FILES "$GOOD_CA_BUNDLE" "${tmpfile%%.crl}.pem") $HOSTCERT &> "${tmpfile%%.crl}.err" + fi + if [[ $? -eq 0 ]]; then + out ", " + pr_svrty_good "not revoked" + fileout "$jsonID" "OK" "not revoked" + else + retcode=$(awk '/error [1-9][0-9]? at [0-9]+ depth lookup:/ { if (!found) {print $2; found=1} }' "${tmpfile%%.crl}.err") + if [[ "$retcode" == 23 ]]; then # see verify_retcode_helper() + out ", " + pr_svrty_critical "revoked" + fileout "$jsonID" "CRITICAL" "revoked" + else + retcode="$(verify_retcode_helper "$retcode")" + out " $retcode" + retcode="${retcode#(}" + retcode="${retcode%)}" + fileout "$jsonID" "WARN" "$retcode" + if [[ $DEBUG -ge 2 ]]; then + outln + cat "${tmpfile%%.crl}.err" + fi + fi + fi + return 0 +} + +check_revocation_ocsp() { + local uri="$1" + local stapled_response="$2" + local jsonID="$3" + local tmpfile="" + local -i success + local response="" + local host_header="" + + "$PHONE_OUT" || [[ -n "$stapled_response" ]] || return 0 + [[ -n "$GOOD_CA_BUNDLE" ]] || return 0 + if [[ -n "$PROXY" ]] && ! "$IGN_OCSP_PROXY"; then + # see #1106 and https://github.com/openssl/openssl/issues/6965 + out ", " + pr_warning "revocation not tested as \"openssl ocsp\" doesn't support a proxy" + fileout "$jsonID" "WARN" "Revocation not tested as openssl ocsp doesn't support a proxy" + return 0 + fi + grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR/intermediatecerts.pem || return 0 + tmpfile=$TEMPDIR/${NODE}-${NODEIP}.${uri##*\/} || exit $ERR_FCREATE + if [[ -n "$stapled_response" ]]; then + asciihex_to_binary "$stapled_response" > "$TEMPDIR/stapled_ocsp_response.dd" + $OPENSSL ocsp -no_nonce -respin "$TEMPDIR/stapled_ocsp_response.dd" \ + -issuer $TEMPDIR/hostcert_issuer.pem -verify_other $TEMPDIR/intermediatecerts.pem \ + -CAfile <(cat $ADDITIONAL_CA_FILES "$GOOD_CA_BUNDLE") -cert $HOSTCERT -text &> "$tmpfile" + else + host_header=${uri##http://} + host_header=${host_header%%/*} + if [[ "$OSSL_NAME" =~ LibreSSL ]]; then + host_header="-header Host ${host_header}" + elif [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 1.1.0* ]] || [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 1.1.1* ]] || \ + [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 3.0.0* ]]; then + host_header="-header Host=${host_header}" + else + host_header="-header Host ${host_header}" + fi + $OPENSSL ocsp -no_nonce ${host_header} -url "$uri" \ + -issuer $TEMPDIR/hostcert_issuer.pem -verify_other $TEMPDIR/intermediatecerts.pem \ + -CAfile <(cat $ADDITIONAL_CA_FILES "$GOOD_CA_BUNDLE") -cert $HOSTCERT -text &> "$tmpfile" + fi + if [[ $? -eq 0 ]] && grep -Fq "Response verify OK" "$tmpfile"; then + response="$(grep -F "$HOSTCERT: " "$tmpfile")" + response="${response#$HOSTCERT: }" + response="${response%\.}" + if [[ "$response" =~ "good" ]]; then + out ", " + pr_svrty_good "not revoked" + fileout "$jsonID" "OK" "not revoked" + elif [[ "$response" =~ "revoked" ]]; then + out ", " + pr_svrty_critical "revoked" + fileout "$jsonID" "CRITICAL" "revoked" + else + out ", " + pr_warning "error querying OCSP responder" + fileout "$jsonID" "WARN" "$response" + if [[ $DEBUG -ge 2 ]]; then + outln + cat "$tmpfile" + else + out " ($response)" + fi + fi + else + [[ -s "$tmpfile" ]] || response="empty ocsp response" + [[ -z "$response" ]] && response="$(awk '/Responder Error:/ { print $3 }' "$tmpfile")" + [[ -z "$response" ]] && grep -Fq "Response Verify Failure" "$tmpfile" && response="unable to verify response" + [[ -z "$response" ]] && response="$(awk -F':' '/Code/ { print $NF }' $tmpfile)" + out ", " + pr_warning "error querying OCSP responder" + fileout "$jsonID" "WARN" "$response" + if [[ $DEBUG -ge 2 ]]; then + outln + [[ -s "$tmpfile" ]] && cat "$tmpfile" || echo "empty ocsp response" + elif [[ -n "$response" ]]; then + out " ($response)" + fi + fi +} + +wait_kill(){ + local pid=$1 # pid we wait for or kill + local maxsleep=$2 # how long we wait before killing + + HAD_SLEPT=0 + while true; do + if ! ps $pid >/dev/null ; then + return 0 # process terminated before didn't reach $maxsleep + fi + [[ "$DEBUG" -ge 6 ]] && ps $pid + sleep 1 + maxsleep=$((maxsleep - 1)) + HAD_SLEPT=$((HAD_SLEPT + 1)) + test $maxsleep -le 0 && break + done # needs to be killed: + kill $pid >&2 2>/dev/null + wait $pid 2>/dev/null # make sure pid terminated, see wait(1p) + return 3 # means killed +} + +# parse_date date format input-format +if "$HAS_GNUDATE"; then # Linux and NetBSD + parse_date() { + LC_ALL=C date -d "$1" "$2" + } +elif "$HAS_FREEBSDDATE"; then # FreeBSD, OS X and newer (~6.6) OpenBSD versions + parse_date() { + LC_ALL=C TZ=GMT date -j -f "$3" "$2" "$1" + } +elif "$HAS_OPENBSDDATE"; then +# We bascially echo it as a conversion as we want it is too difficult. Approach for that would be: +# printf '%s\n' "$1" | awk '{ printf "%04d%02d%02d\n", $4, $2, (index("JanFebMarAprMayJunJulAugSepOctNovDec",$1)+2)/3}' +# 4: year, 1: month, 2: day, $3: time (e.g. "Dec 8 10:16:13 2016") +# This way we could also kind of convert args to epoch but as newer OpenBSDs "date" behave like FreeBSD + parse_date() { + local tmp="" + if [[ $2 == +%s* ]]; then + echo "${1// GMT}" + else + tmp="$(printf '%s\n' "$1" | awk '{ printf "%04d-%02d-%02d %08s\n", $4, (index("JanFebMarAprMayJunJulAugSepOctNovDec",$1)+2)/3, $2, $3 }')" + echo "${tmp%:*}" # remove seconds, result now is in line with GNU date 2016-12-08 10:16 + fi + } +else + parse_date() { + LC_ALL=C date -j "$2" "$1" + } +fi + +# arg1: An ASCII-HEX string +# Print $arg1 in binary format +asciihex_to_binary() { + local string="$1" + local -i len + local -i i ip2 ip4 ip6 ip8 ip10 ip12 ip14 + local -i remainder + + len=${#string} + [[ $len%2 -ne 0 ]] && return 1 + + for (( i=0; i <= len-16 ; i=i+16 )); do + ip2=$((i+2)); ip4=$((i+4)); ip6=$((i+6)); ip8=$((i+8)); ip10=$((i+10)); ip12=$((i+12)); ip14=$((i+14)) + printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}\x${string:ip6:2}\x${string:ip8:2}\x${string:ip10:2}\x${string:ip12:2}\x${string:ip14:2}" + done + + ip2=$((i+2)); ip4=$((i+4)); ip6=$((i+6)); ip8=$((i+8)); ip10=$((i+10)); ip12=$((i+12)); ip14=$((i+14)) + remainder=$len-$i + case $remainder in + 2) printf -- "\x${string:i:2}" ;; + 4) printf -- "\x${string:i:2}\x${string:ip2:2}" ;; + 6) printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}" ;; + 8) printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}\x${string:ip6:2}" ;; + 10) printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}\x${string:ip6:2}\x${string:ip8:2}" ;; + 12) printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}\x${string:ip6:2}\x${string:ip8:2}\x${string:ip10:2}" ;; + 14) printf -- "\x${string:i:2}\x${string:ip2:2}\x${string:ip4:2}\x${string:ip6:2}\x${string:ip8:2}\x${string:ip10:2}\x${string:ip12:2}" ;; + esac + return 0 +} + +# arg1: text string +# Output a comma-separated ASCII-HEX string representation of the input string. +string_to_asciihex() { + local string="$1" + local -i i eos + local output="" + + eos=${#string}-1 + for (( i=0; i$TMPFILE 2>$ERRFILE & + wait_kill $! $HEADER_MAXSLEEP + was_killed=$? + head $TMPFILE | grep -aq '^HTTP\/' && SERVICE=HTTP + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -waq "SMTP|ESMTP|Exim|IdeaSmtpServer|Kerio Connect|Postfix" && SERVICE=SMTP # I know some overlap here + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -Ewaq "POP|Gpop|MailEnable POP3 Server|OK Dovecot|Cyrus POP3" && SERVICE=POP # I know some overlap here + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -Ewaq "IMAP|IMAP4|Cyrus IMAP4IMAP4rev1|IMAP4REV1|Gimap" && SERVICE=IMAP # I know some overlap here + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -aq FTP && SERVICE=FTP + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -Eaqi "jabber|xmpp" && SERVICE=XMPP + [[ -z "$SERVICE" ]] && head $TMPFILE | grep -Eaqw "Jive News|InterNetNews|NNRP|INN|Kerio Connect|NNTP Service|Kerio MailServer|NNTP server" && SERVICE=NNTP + # MongoDB port 27017 will respond to a GET request with a mocked HTTP response + [[ "$SERVICE" == HTTP ]] && head $TMPFILE | grep -Eaqw "MongoDB" && SERVICE=MongoDB + debugme head -50 $TMPFILE | sed -e '//,$d' -e '//,$d' -e '/ skipping all HTTP checks" + echo "certificate-based authentication => skipping all HTTP checks" >$TMPFILE + fileout "${jsonID}" "INFO" "certificate-based authentication => skipping all HTTP checks" + else + out " Couldn't determine what's running on port $PORT" + if "$ASSUME_HTTP"; then + SERVICE=HTTP + out " -- ASSUME_HTTP set though" + fileout "${jsonID}" "DEBUG" "Couldn't determine service -- ASSUME_HTTP set" + else + out ", assuming no HTTP service => skipping all HTTP checks" + fileout "${jsonID}" "DEBUG" "Couldn't determine service, skipping all HTTP checks" + fi + fi + ;; + esac + + outln "\n" + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + +# 1: counter variable +# 2: threshold for this variable +# 3: string for first occurrence of problem +# 4: string for repeated occurrence of problem +# +connectivity_problem() { + if [[ $1 -lt $2 ]]; then + if "$TLS13_ONLY" && ! "$HAS_TLS13"; then + : + else + prln_warning " Oops: $3" + fi + return 0 + fi + if [[ $1 -ge $2 ]]; then + if [[ "$4" =~ openssl\ s_client\ connect ]] ; then + fatal "$4" $ERR_CONNECT "Consider increasing MAX_OSSL_FAIL (currently: $2)" + elif [[ "$4" =~ repeated\ TCP\ connect ]]; then + fatal "$4" $ERR_CONNECT "Consider increasing MAX_SOCKET_FAIL (currently: $2)" + fi + fatal "$4" $ERR_CONNECT + fi +} + + +#problems not handled: chunked +run_http_header() { + local header + local referer useragent + local url redirect + local jsonID="HTTP_status_code" + local spaces=" " + + HEADERFILE=$TEMPDIR/$NODEIP.http_header.txt + if [[ $NR_HEADER_FAIL -eq 0 ]]; then + # skip repeating this line if it's 2nd, 3rd,.. try + outln; pr_headlineln " Testing HTTP header response @ \"$URL_PATH\" " + outln + fi + if [[ $NR_HEADER_FAIL -ge $MAX_HEADER_FAIL ]]; then + # signal to caller we have a problem + return 1 + fi + + pr_bold " HTTP Status Code " + [[ -z "$1" ]] && url="/" || url="$1" + printf "$GET_REQ11" | $OPENSSL s_client $(s_client_options "$OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP:$PORT $PROXY $SNI") >$HEADERFILE 2>$ERRFILE & + wait_kill $! $HEADER_MAXSLEEP + if [[ $? -eq 0 ]]; then + # Issue HTTP GET again as it properly finished within $HEADER_MAXSLEEP and didn't hang. + # Doing it again in the foreground to get an accurate header time + printf "$GET_REQ11" | $OPENSSL s_client $(s_client_options "$OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP:$PORT $PROXY $SNI") >$HEADERFILE 2>$ERRFILE + NOW_TIME=$(date "+%s") + HTTP_TIME=$(awk -F': ' '/^date:/ { print $2 } /^Date:/ { print $2 }' $HEADERFILE) + HAD_SLEPT=0 + else + # 1st GET request hung and needed to be killed. Check whether it succeeded anyway: + if grep -Eiaq "XML|HTML|DOCTYPE|HTTP|Connection" $HEADERFILE; then + # correct by seconds we slept, HAD_SLEPT comes from wait_kill() + NOW_TIME=$(($(date "+%s") - HAD_SLEPT)) + HTTP_TIME=$(awk -F': ' '/^date:/ { print $2 } /^Date:/ { print $2 }' $HEADERFILE) + else + prln_warning " likely HTTP header requests failed (#lines: $(wc -l $HEADERFILE | awk '{ print $1 }'))" + [[ "$DEBUG" -lt 1 ]] && outln "Rerun with DEBUG>=1 and inspect $HEADERFILE\n" + fileout "HTTP_status_code" "WARN" "HTTP header request failed" + debugme cat $HEADERFILE + ((NR_HEADER_FAIL++)) + fi + fi + if [[ ! -s $HEADERFILE ]]; then + ((NR_HEADER_FAIL++)) + if [[ $NR_HEADER_FAIL -ge $MAX_HEADER_FAIL ]]; then + # Now, try to give a hint whether it would make sense to try with OpenSSL 1.1.0 or 1.1.1 instead + if [[ $CURVES_OFFERED == X448 ]] && ! "$HAS_X448" ; then + generic_nonfatal "HTTP header was repeatedly zero due to missing X448 curve." "${spaces}OpenSSL 1.1.1 might help. Skipping complete HTTP header section." + elif [[ $CURVES_OFFERED == X25519 ]] && ! "$HAS_X25519" ; then + generic_nonfatal "HTTP header was repeatedly zero due to missing X25519 curve." "${spaces}OpenSSL 1.1.0 might help. Skipping complete HTTP header section." + elif [[ $CURVES_OFFERED =~ X25519 ]] && [[ $CURVES_OFFERED =~ X448 ]] && ! "$HAS_X25519" && ! "$HAS_X448"; then + generic_nonfatal "HTTP header was repeatedly zero due to missing X25519/X448 curves." "${spaces}OpenSSL >=1.1.0 might help. Skipping complete HTTP header section." + else + # we could give more hints but these are the most likely cases + generic_nonfatal "HTTP header was repeatedly zero." "Skipping complete HTTP header section." + fi + KNOWN_OSSL_PROB=true + return 1 + else + pr_warning "HTTP header reply empty. " + fileout "$jsonID" "WARN" "HTTP header reply empty" + fi + fi + + # Populate vars for HTTP time + debugme echo "NOW_TIME: $NOW_TIME | HTTP_TIME: $HTTP_TIME" + + # Quit on first empty line to catch 98% of the cases. Next pattern is there because the SEDs tested + # so far seem not to be fine with header containing x0d x0a (CRLF) which is the usal case. + # So we also trigger also on any sign on a single line which is not alphanumeric (plus _) + sed -e '/^$/q' -e '/^[^a-zA-Z_0-9]$/q' $HEADERFILE >$HEADERFILE.tmp + # Now to be more sure we delete from '<' or '{' maybe with a leading blank until the end + sed -e '/^ *<.*$/d' -e '/^ *{.*$/d' $HEADERFILE.tmp >$HEADERFILE + debugme echo -e "---\n $(< $HEADERFILE) \n---" + + HTTP_STATUS_CODE=$(awk '/^HTTP\// { print $2 }' $HEADERFILE 2>>$ERRFILE) + msg_thereafter=$(awk -F"$HTTP_STATUS_CODE" '/^HTTP\// { print $2 }' $HEADERFILE 2>>$ERRFILE) # dirty trick to use the status code as a + msg_thereafter=$(strip_lf "$msg_thereafter") # field separator, otherwise we need a loop with awk + debugme echo "Status/MSG: $HTTP_STATUS_CODE $msg_thereafter" + + [[ -n "$HTTP_STATUS_CODE" ]] && out " $HTTP_STATUS_CODE$msg_thereafter" + case $HTTP_STATUS_CODE in + 301|302|307|308) + redirect=$(grep -a '^Location' $HEADERFILE | sed 's/Location: //' | tr -d '\r\n') + out ", redirecting to \""; pr_url "$redirect"; out "\"" + if [[ $redirect =~ http:// ]]; then + pr_svrty_high " -- Redirect to insecure URL (NOT ok)" + fileout "insecure_redirect" "HIGH" "Redirect to insecure URL: \"$redirect\"" + fi + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\")" + ;; + 200|204|403|405) + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\")" + ;; + 206) + out " -- WHAT?" + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\") -- WHAT?" + # partial content shouldn't happen + ;; + 400) + pr_cyan " (Hint: better try another URL)" + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\") -- better try another URL" + ;; + 401) + grep -aq "^WWW-Authenticate" $HEADERFILE && out " "; out "$(strip_lf "$(grep -a "^WWW-Authenticate" $HEADERFILE)")" + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\") -- $(grep -a "^WWW-Authenticate" $HEADERFILE)" + ;; + 404) + out " (Hint: supply a path which doesn't give a \"$HTTP_STATUS_CODE$msg_thereafter\")" + fileout "$jsonID" "INFO" "$HTTP_STATUS_CODE$msg_thereafter (\"$URL_PATH\")" + ;; + "") + prln_warning "No HTTP status code." + fileout "$jsonID" "WARN" "No HTTP status code" + return 1 + ;; + *) + pr_warning ". Oh, didn't expect \"$HTTP_STATUS_CODE$msg_thereafter\"" + fileout "$jsonID" "WARN" "Unexpected $HTTP_STATUS_CODE$msg_thereafter @ \"$URL_PATH\"" + ;; + esac + outln + + # we don't call "tmpfile_handle ${FUNCNAME[0]}.txt" as we need the header file in other functions! + return 0 +} + +# Borrowed from Glenn Jackman, see https://unix.stackexchange.com/users/4667/glenn-jackman +# +match_ipv4_httpheader() { + local octet="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])" + local ipv4address="$octet\\.$octet\\.$octet\\.$octet" + local whitelisted_header="pagespeed|page-speed|^Content-Security-Policy|^MicrosoftSharePointTeamServices|^X-OWA-Version|^Location|^Server: PRTG" + local your_ip_msg="(check if it's your IP address or e.g. a cluster IP)" + local result + local first=true + local spaces=" " + local count + local jsonID="ipv4_in_header" + local cwe="CWE-212" + local cve="" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + + # Whitelist some headers as they are mistakenly identified as ipv4 address. Issues #158, #323. + # Also facebook used to have a CSP rule for 127.0.0.1 + if grep -Evai "$whitelisted_header" $HEADERFILE | grep -Eiq "$ipv4address"; then + pr_bold " IPv4 address in header " + count=0 + while read line; do + result="$(grep -E "$ipv4address" <<< "$line")" + result=$(strip_lf "$result") + if [[ -n "$result" ]]; then + if ! $first; then + out "$spaces" + your_ip_msg="" + else + first=false + fi + pr_svrty_medium "$result" + outln "\n$spaces$your_ip_msg" + fileout "$jsonID" "MEDIUM" "$result $your_ip_msg" "$cve" "$cwe" + fi + count=$count+1 + done < $HEADERFILE + fi +} + + +run_http_date() { + local difftime + local spaces=" " + jsonID="HTTP_clock_skew" + + if [[ $SERVICE != HTTP ]] || "$CLIENT_AUTH"; then + return 0 + fi + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " HTTP clock skew " + if [[ -n "$HTTP_TIME" ]]; then + HTTP_TIME="$(strip_lf "$HTTP_TIME")" + if "$HAS_OPENBSDDATE"; then + # We won't normalize the date under an OpenBSD thus no subtraction is feasible + outln "remote: $HTTP_TIME" + out "${spaces}local: $(LC_ALL=C TZ=GMT date "+%a, %d %b %Y %T %Z")" + fileout "$jsonID" "INFO" "$HTTP_TIME - $(TZ=GMT date "+%a, %d %b %Y %T %Z")" + else + HTTP_TIME="$(parse_date "$HTTP_TIME" "+%s" "%a, %d %b %Y %T %Z" 2>>$ERRFILE)" + difftime=$((HTTP_TIME - NOW_TIME)) + [[ $difftime != "-"* ]] && [[ $difftime != "0" ]] && difftime="+$difftime" + # process was killed, so we need to add an error + [[ $HAD_SLEPT -ne 0 ]] && difftime="$difftime (± 1.5)" + out "$difftime sec from localtime"; + fileout "$jsonID" "INFO" "$difftime seconds from localtime" + fi + else + out "Got no HTTP time, maybe try different URL?"; + fileout "$jsonID" "INFO" "Got no HTTP time, maybe try different URL?" + fi + debugme tm_out ", HTTP_TIME in epoch: $HTTP_TIME" + outln + match_ipv4_httpheader "$1" + return 0 +} + + +# HEADERFILE needs to contain the HTTP header (made sure by invoker) +# arg1: key=word to match +# arg2: hint for fileout() if double header +# arg3: indentation, i.e string w spaces +# arg4: whether we need a CR before "misconfiguration" +# returns: +# 0 if header not found +# 1-n nr of headers found, then in HEADERVALUE the first value from key +# +match_httpheader_key() { + local key="$1" + local spaces="$3" + local first=$4 + local -i nr=0 + + nr=$(grep -Eaic "^ *$key:" $HEADERFILE) + if [[ $nr -eq 0 ]]; then + HEADERVALUE="" + return 0 + elif [[ $nr -eq 1 ]]; then + HEADERVALUE="$(grep -Eia "^ *$key:" $HEADERFILE)" + HEADERVALUE="${HEADERVALUE#*:}" # remove leading part=key to colon + HEADERVALUE="$(strip_lf "$HEADERVALUE")" + HEADERVALUE="$(strip_leading_space "$HEADERVALUE")" + "$first" || out "$spaces" + return 1 + else + "$first" || out "$spaces" + pr_svrty_medium "misconfiguration: " + pr_italic "$key" + pr_svrty_medium " ${nr}x" + outln " -- checking first one only" + out "$spaces" + HEADERVALUE="$(fgrep -Fai "$key:" $HEADERFILE | head -1)" + HEADERVALUE="${HEADERVALUE#*:}" + HEADERVALUE="$(strip_lf "$HEADERVALUE")" + HEADERVALUE="$(strip_leading_space "$HEADERVALUE")" + [[ $DEBUG -ge 2 ]] && tm_italic "$HEADERVALUE" && tm_out "\n$spaces" + fileout "${2}_multiple" "MEDIUM" "Multiple $2 headers. Using first header: $HEADERVALUE" + return $nr + fi +} + +includeSubDomains() { + if grep -aiqw includeSubDomains "$1"; then + pr_svrty_good ", includeSubDomains" + return 0 + else + pr_litecyan ", just this domain" + return 1 + fi +} + +preload() { + if grep -aiqw preload "$1"; then + pr_svrty_good ", preload" + return 0 + else + return 1 + fi +} + + +run_hsts() { + local hsts_age_sec + local hsts_age_days + local spaces=" " + local jsonID="HSTS" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " Strict Transport Security " + match_httpheader_key "Strict-Transport-Security" "HSTS" "$spaces" "true" + if [[ $? -ne 0 ]]; then + echo "$HEADERVALUE" >$TMPFILE + hsts_age_sec=$(sed -e 's/[^0-9]*//g' <<< $HEADERVALUE) + debugme echo "hsts_age_sec: $hsts_age_sec" + if [[ -n $hsts_age_sec ]]; then + hsts_age_days=$(( hsts_age_sec / 86400)) + else + hsts_age_days=-1 + fi + if [[ $hsts_age_days -eq -1 ]]; then + pr_svrty_medium "misconfiguration: HSTS max-age (recommended > 15552000 seconds = 180 days ) is required but missing" + fileout "${jsonID}_time" "MEDIUM" "misconfiguration, parameter max-age (recommended > 15552000 seconds = 180 days) missing" + elif [[ $hsts_age_sec -eq 0 ]]; then + pr_svrty_low "HSTS max-age is set to 0. HSTS is disabled" + fileout "${jsonID}_time" "LOW" "0. HSTS is disabled" + elif [[ $hsts_age_sec -gt $HSTS_MIN ]]; then + pr_svrty_good "$hsts_age_days days" ; out "=$hsts_age_sec s" + fileout "${jsonID}_time" "OK" "$hsts_age_days days (=$hsts_age_sec seconds) > $HSTS_MIN seconds" + else + pr_svrty_medium "$hsts_age_sec s = $hsts_age_days days is too short ( >=$HSTS_MIN seconds recommended)" + fileout "${jsonID}_time" "MEDIUM" "max-age too short. $hsts_age_days days (=$hsts_age_sec seconds) < $HSTS_MIN seconds" + fi + if includeSubDomains "$TMPFILE"; then + fileout "${jsonID}_subdomains" "OK" "includes subdomains" + else + fileout "${jsonID}_subdomains" "INFO" "only for this domain" + fi + if preload "$TMPFILE"; then + fileout "${jsonID}_preload" "OK" "domain IS marked for preloading" + else + fileout "${jsonID}_preload" "INFO" "domain is NOT marked for preloading" + #FIXME: To be checked against preloading lists, + # e.g. https://dxr.mozilla.org/mozilla-central/source/security/manager/boot/src/nsSTSPreloadList.inc + # https://chromium.googlesource.com/chromium/src/+/master/net/http/transport_security_state_static.json + fi + else + pr_svrty_low "not offered" + fileout "$jsonID" "LOW" "not offered" + fi + outln + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +run_hpkp() { + local -i hpkp_age_sec + local -i hpkp_age_days + local -i hpkp_nr_keys + local hpkp_spki hpkp_spki_hostcert + local -a backup_spki + local spaces=" " + local spaces_indented=" " + local certificate_found=false + local -i i nrsaved + local first_hpkp_header + local spki + local ca_hashes="$TESTSSL_INSTALL_DIR/etc/testssl/ca_hashes.txt" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " Public Key Pinning " + grep -aiw '^Public-Key-Pins' $HEADERFILE >$TMPFILE # TMPFILE includes report-only + if [[ $? -eq 0 ]]; then + if [[ $(grep -aci '^Public-Key-Pins:' $TMPFILE) -gt 1 ]]; then + pr_svrty_medium "Misconfiguration, multiple Public-Key-Pins headers" + outln ", taking first line" + fileout "HPKP_error" "MEDIUM" "multiple Public-Key-Pins in header" + first_hpkp_header="$(grep -ai '^Public-Key-Pins:' $TMPFILE | head -1)" + # we only evaluate the keys here, unless they a not present + out "$spaces " + elif [[ $(grep -aci '^Public-Key-Pins-Report-Only:' $TMPFILE) -gt 1 ]]; then + outln "Multiple HPKP headers (Report-Only), taking first line" + fileout "HPKP_notice" "INFO" "multiple Public-Key-Pins-Report-Only in header" + first_hpkp_header="$(grep -ai '^Public-Key-Pins-Report-Only:' $TMPFILE | head -1)" + out "$spaces " + elif [[ $(grep -Eaci '^Public-Key-Pins:|^Public-Key-Pins-Report-Only:' $TMPFILE) -eq 2 ]]; then + outln "Public-Key-Pins + Public-Key-Pins-Report-Only detected. Continue with first one" + first_hpkp_header="$(grep -ai '^Public-Key-Pins:' $TMPFILE)" + out "$spaces " + elif [[ $(grep -aci '^Public-Key-Pins:' $TMPFILE) -eq 1 ]]; then + first_hpkp_header="$(grep -ai '^Public-Key-Pins:' $TMPFILE)" + else + outln "Public-Key-Pins-Only detected" + first_hpkp_header="$(grep -ai '^Public-Key-Pins-Report-Only:' $TMPFILE)" + out "$spaces " + fileout "HPKP_SPKIs" "INFO" "Only Public-Key-Pins-Report-Only" + fi + + # remove leading Public-Key-Pins* and convert it to multiline arg + sed -e 's/Public-Key-Pins://g' -e s'/Public-Key-Pins-Report-Only://' <<< "$first_hpkp_header" | \ + tr ';' '\n' | sed -e 's/\"//g' -e 's/^ //' >$TMPFILE + + hpkp_nr_keys=$(grep -ac pin-sha $TMPFILE) + if [[ $hpkp_nr_keys -eq 1 ]]; then + pr_svrty_high "Only one key pinned (NOT ok), means the site may become unavailable in the future, " + fileout "HPKP_SPKIs" "HIGH" "Only one key pinned" + else + pr_svrty_good "$hpkp_nr_keys" + out " keys, " + fileout "HPKP_SPKIs" "OK" "$hpkp_nr_keys keys pinned in header" + fi + + # print key=value pair with awk, then strip non-numbers, to be improved with proper parsing of key-value with awk + if "$HAS_SED_E"; then + hpkp_age_sec=$(awk -F= '/max-age/{max_age=$2; print max_age}' $TMPFILE | sed -E 's/[^[:digit:]]//g') + else + hpkp_age_sec=$(awk -F= '/max-age/{max_age=$2; print max_age}' $TMPFILE | sed -r 's/[^[:digit:]]//g') + fi + hpkp_age_days=$((hpkp_age_sec / 86400)) + if [[ $hpkp_age_sec -ge $HPKP_MIN ]]; then + pr_svrty_good "$hpkp_age_days days" ; out "=$hpkp_age_sec s" + fileout "HPKP_age" "OK" "HPKP age is set to $hpkp_age_days days ($hpkp_age_sec sec)" + else + out "$hpkp_age_sec s = " + pr_svrty_medium "$hpkp_age_days days (< $HPKP_MIN s = $((HPKP_MIN / 86400)) days is not good enough)" + fileout "HPKP_age" "MEDIUM" "age is set to $hpkp_age_days days ($hpkp_age_sec sec) < $HPKP_MIN s = $((HPKP_MIN / 86400)) days is not good enough." + fi + + if includeSubDomains "$TMPFILE"; then + fileout "HPKP_subdomains" "INFO" "is valid for subdomains as well" + else + fileout "HPKP_subdomains" "INFO" "is valid for this domain only" + fi + if preload "$TMPFILE"; then + fileout "HPKP_preload" "INFO" "IS marked for browser preloading" + else + fileout "HPKP_preload" "INFO" "NOT marked for browser preloading" + fi + + # Get the SPKIs first + spki=$(tr ';' '\n' < $TMPFILE | tr -d ' ' | tr -d '\"' | awk -F'=' '/pin.*=/ { print $2 }') + debugme tmln_out "\n$spki" + + # Look at the host certificate first + if [[ ! -s "$HOSTCERT" ]]; then + get_host_cert || return 1 + # no host certificate + fi + + hpkp_spki_hostcert="$($OPENSSL x509 -in $HOSTCERT -pubkey -noout 2>/dev/null | grep -v PUBLIC | \ + $OPENSSL base64 -d 2>/dev/null | $OPENSSL dgst -sha256 -binary 2>/dev/null | $OPENSSL base64 2>/dev/null)" + hpkp_ca="$($OPENSSL x509 -in $HOSTCERT -issuer -noout 2>/dev/null |sed 's/^.*CN=//' | sed 's/\/.*$//')" + + # Get keys/hashes from intermediate certificates + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS $PROXY -showcerts -connect $NODEIP:$PORT $SNI") $TMPFILE 2>$ERRFILE + # Place the server's certificate in $HOSTCERT and any intermediate + # certificates that were provided in $TEMPDIR/intermediatecerts.pem + # https://backreference.org/2010/05/09/ocsp-verification-with-openssl/ + awk -v n=-1 "/Certificate chain/ {start=1} + /-----BEGIN CERTIFICATE-----/{ if (start) {inc=1; n++} } + inc { print > (\"$TEMPDIR/level\" n \".crt\") } + /---END CERTIFICATE-----/{ inc=0 }" $TMPFILE + nrsaved=$(count_words "$(echo $TEMPDIR/level?.crt 2>/dev/null)") + rm $TEMPDIR/level0.crt 2>/dev/null + + printf ""> "$TEMPDIR/intermediate.hashes" + if [[ $nrsaved -ge 2 ]]; then + for cert_fname in $TEMPDIR/level?.crt; do + hpkp_spki_ca="$($OPENSSL x509 -in "$cert_fname" -pubkey -noout 2>/dev/null | grep -v PUBLIC | $OPENSSL base64 -d 2>/dev/null | + $OPENSSL dgst -sha256 -binary 2>/dev/null | $OPENSSL enc -base64 2>/dev/null)" + hpkp_name="$(get_cn_from_cert $cert_fname)" + hpkp_ca="$($OPENSSL x509 -in $cert_fname -issuer -noout 2>/dev/null |sed 's/^.*CN=//' | sed 's/\/.*$//')" + [[ -n $hpkp_name ]] || hpkp_name=$($OPENSSL x509 -in "$cert_fname" -subject -noout 2>/dev/null | sed 's/^subject= //') + echo "$hpkp_spki_ca $hpkp_name" >> "$TEMPDIR/intermediate.hashes" + done + fi + + # This is where the matching magic starts. First host, intermediate, then root certificate from the supplied stores + spki_match=false + has_backup_spki=false + i=0 + for hpkp_spki in $spki; do + certificate_found=false + # compare collected SPKIs against the host certificate + if [[ "$hpkp_spki_hostcert" == "$hpkp_spki" ]] || [[ "$hpkp_spki_hostcert" == "$hpkp_spki=" ]]; then + certificate_found=true # We have a match + spki_match=true + out "\n$spaces_indented Host cert: " + pr_svrty_good "$hpkp_spki" + fileout "HPKP_$hpkp_spki" "OK" "SPKI $hpkp_spki matches the host certificate" + fi + debugme tm_out "\n $hpkp_spki | $hpkp_spki_hostcert" + + # Check for intermediate match + if ! "$certificate_found"; then + hpkp_matches=$(grep "$hpkp_spki" $TEMPDIR/intermediate.hashes 2>/dev/null) + if [[ -n $hpkp_matches ]]; then # hpkp_matches + hpkp_spki + '=' + # We have a match + certificate_found=true + spki_match=true + out "\n$spaces_indented Sub CA: " + pr_svrty_good "$hpkp_spki" + ca_cn="$(sed "s/^[a-zA-Z0-9\+\/]*=* *//" <<< $"$hpkp_matches" )" + pr_italic " $ca_cn" + fileout "HPKP_$hpkp_spki" "OK" "SPKI $hpkp_spki matches Intermediate CA \"$ca_cn\" pinned in the HPKP header" + fi + fi + + # we compare now against a precompiled list of SPKIs against the ROOT CAs we have in $ca_hashes + if ! "$certificate_found"; then + hpkp_matches=$(grep -h "$hpkp_spki" $ca_hashes 2>/dev/null | sort -u) + if [[ -n $hpkp_matches ]]; then + certificate_found=true # root CA found + spki_match=true + if [[ $(count_lines "$hpkp_matches") -eq 1 ]]; then + # replace by awk + match_ca=$(sed "s/[a-zA-Z0-9\+\/]*=* *//" <<< "$hpkp_matches") + else + match_ca="" + + fi + ca_cn="$(sed "s/^[a-zA-Z0-9\+\/]*=* *//" <<< $"$hpkp_matches" )" + if [[ "$match_ca" == "$hpkp_ca" ]]; then # part of the chain + out "\n$spaces_indented Root CA: " + pr_svrty_good "$hpkp_spki" + pr_italic " $ca_cn" + fileout "HPKP_$hpkp_spki" "INFO" "SPKI $hpkp_spki matches Root CA \"$ca_cn\" pinned. (Root CA part of the chain)" + else # not part of chain + match_ca="" + has_backup_spki=true # Root CA outside the chain --> we save it for unmatched + fileout "HPKP_$hpkp_spki" "INFO" "SPKI $hpkp_spki matches Root CA \"$ca_cn\" pinned. (Root backup SPKI)" + backup_spki[i]="$(strip_lf "$hpkp_spki")" # save it for later + backup_spki_str[i]="$ca_cn" # also the name=CN of the root CA + i=$((i + 1)) + fi + fi + fi + + # still no success --> it's probably a backup SPKI + if ! "$certificate_found"; then + # Most likely a backup SPKI, unfortunately we can't tell for what it is: host, intermediates + has_backup_spki=true + backup_spki[i]="$(strip_lf "$hpkp_spki")" # save it for later + backup_spki_str[i]="" # no root ca + i=$((i + 1)) + fileout "HPKP_$hpkp_spki" "INFO" "SPKI $hpkp_spki doesn't match anything. This is ok for a backup for any certificate" + # CSV/JSON output here for the sake of simplicity, rest we do en bloc below + fi + done + + # now print every backup spki out we saved before + out "\n$spaces_indented Backups: " + + # for i=0 manually do the same as below as there's other indentation here + if [[ -n "${backup_spki_str[0]}" ]]; then + pr_svrty_good "${backup_spki[0]}" + #out " Root CA: " + prln_italic " ${backup_spki_str[0]}" + else + outln "${backup_spki[0]}" + fi + # now for i=1 + for ((i=1; i < ${#backup_spki[@]} ;i++ )); do + if [[ -n "${backup_spki_str[i]}" ]]; then + # it's a Root CA outside the chain + pr_svrty_good "$spaces_indented ${backup_spki[i]}" + #out " Root CA: " + prln_italic " ${backup_spki_str[i]}" + else + outln "$spaces_indented ${backup_spki[i]}" + fi + done + if [[ ! -f "$ca_hashes" ]] && "$spki_match"; then + out "$spaces " + prln_warning "Attribution of further hashes couldn't be done as $ca_hashes could not be found" + fileout "HPKP_SPKImatch" "WARN" "Attribution of further hashes possible as $ca_hashes could not be found" + fi + + # If all else fails... + if ! "$spki_match"; then + "$has_backup_spki" && out "$spaces" # we had a few lines with backup SPKIs already + prln_svrty_high " No matching key for SPKI found " + fileout "HPKP_SPKImatch" "HIGH" "None of the SPKI match your host certificate, intermediate CA or known root CAs. Bricked site?" + fi + + if ! "$has_backup_spki"; then + prln_svrty_high " No backup keys found. Loss/compromise of the currently pinned key(s) will lead to bricked site. " + fileout "HPKP_backup" "HIGH" "No backup keys found. Loss/compromise of the currently pinned key(s) will lead to bricked site." + fi + else + outln "--" + fileout "HPKP" "INFO" "No support for HTTP Public Key Pinning" + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + +emphasize_stuff_in_headers(){ + local html_brown="" + local html_yellow="" + local html_off="<\\/span>" + +# see https://www.grymoire.com/Unix/Sed.html#uh-3 +# outln "$1" | sed "s/[0-9]*/$brown&${off}/g" + tmln_out "$1" | sed -e "s/\([0-9]\)/${brown}\1${off}/g" \ + -e "s/Unix/${yellow}Unix${off}/g" \ + -e "s/Debian/${yellow}Debian${off}/g" \ + -e "s/Win32/${yellow}Win32${off}/g" \ + -e "s/Win64/${yellow}Win64${off}/g" \ + -e "s/Ubuntu/${yellow}Ubuntu${off}/g" \ + -e "s/ubuntu/${yellow}ubuntu${off}/g" \ + -e "s/buster/${yellow}buster${off}/g" \ + -e "s/stretch/${yellow}stretch${off}/g" \ + -e "s/jessie/${yellow}jessie${off}/g" \ + -e "s/squeeze/${yellow}squeeze${off}/g" \ + -e "s/wheezy/${yellow}wheezy${off}/g" \ + -e "s/lenny/${yellow}lenny${off}/g" \ + -e "s/SUSE/${yellow}SUSE${off}/g" \ + -e "s/Red Hat Enterprise Linux/${yellow}Red Hat Enterprise Linux${off}/g" \ + -e "s/Red Hat/${yellow}Red Hat${off}/g" \ + -e "s/CentOS/${yellow}CentOS${off}/g" \ + -e "s/Via/${yellow}Via${off}/g" \ + -e "s/X-Forwarded/${yellow}X-Forwarded${off}/g" \ + -e "s/Liferay-Portal/${yellow}Liferay-Portal${off}/g" \ + -e "s/X-Cache-Lookup/${yellow}X-Cache-Lookup${off}/g" \ + -e "s/X-Cache/${yellow}X-Cache${off}/g" \ + -e "s/X-Squid/${yellow}X-Squid${off}/g" \ + -e "s/X-Server/${yellow}X-Server${off}/g" \ + -e "s/X-Varnish/${yellow}X-Varnish${off}/g" \ + -e "s/X-OWA-Version/${yellow}X-OWA-Version${off}/g" \ + -e "s/MicrosoftSharePointTeamServices/${yellow}MicrosoftSharePointTeamServices${off}/g" \ + -e "s/X-Application-Context/${yellow}X-Application-Context${off}/g" \ + -e "s/X-Version/${yellow}X-Version${off}/g" \ + -e "s/X-Powered-By/${yellow}X-Powered-By${off}/g" \ + -e "s/X-UA-Compatible/${yellow}X-UA-Compatible${off}/g" \ + -e "s/Link/${yellow}Link${off}/g" \ + -e "s/X-Rack-Cache/${yellow}X-Rack-Cache${off}/g" \ + -e "s/X-Runtime/${yellow}X-Runtime${off}/g" \ + -e "s/X-Pingback/${yellow}X-Pingback${off}/g" \ + -e "s/X-Permitted-Cross-Domain-Policies/${yellow}X-Permitted-Cross-Domain-Policies${off}/g" \ + -e "s/X-AspNet-Version/${yellow}X-AspNet-Version${off}/g" \ + -e "s/x-note/${yellow}x-note${off}/g" \ + -e "s/x-global-transaction-id/${yellow}x-global-transaction-id${off}/g" \ + -e "s/X-Global-Transaction-ID/${yellow}X-Global-Transaction-ID${off}/g" \ + -e "s/Alt-Svc/${yellow}Alt-Svc${off}/g" \ + -e "s/system-wsgw-management-loopback/${yellow}system-wsgw-management-loopback${off}/g" + + if "$do_html"; then + if [[ $COLOR -ge 2 ]]; then + html_out "$(tm_out "$1" | sed -e 's/\&/\&/g' \ + -e 's//\>/g' -e 's/"/\"/g' -e "s/'/\'/g" \ + -e "s/\([0-9]\)/${html_brown}\1${html_off}/g" \ + -e "s/Unix/${html_yellow}Unix${html_off}/g" \ + -e "s/Debian/${html_yellow}Debian${html_off}/g" \ + -e "s/Win32/${html_yellow}Win32${html_off}/g" \ + -e "s/Win64/${html_yellow}Win64${html_off}/g" \ + -e "s/Ubuntu/${html_yellow}Ubuntu${html_off}/g" \ + -e "s/ubuntu/${html_yellow}ubuntu${html_off}/g" \ + -e "s/buster/${html_yellow}buster${html_off}/g" \ + -e "s/stretch/${html_yellow}stretch${html_off}/g" \ + -e "s/jessie/${html_yellow}jessie${html_off}/g" \ + -e "s/squeeze/${html_yellow}squeeze${html_off}/g" \ + -e "s/wheezy/${html_yellow}wheezy${html_off}/g" \ + -e "s/lenny/${html_yellow}lenny${html_off}/g" \ + -e "s/SUSE/${html_yellow}SUSE${html_off}/g" \ + -e "s/Red Hat Enterprise Linux/${html_yellow}Red Hat Enterprise Linux${html_off}/g" \ + -e "s/Red Hat/${html_yellow}Red Hat${html_off}/g" \ + -e "s/CentOS/${html_yellow}CentOS${html_off}/g" \ + -e "s/Via/${html_yellow}Via${html_off}/g" \ + -e "s/X-Forwarded/${html_yellow}X-Forwarded${html_off}/g" \ + -e "s/Liferay-Portal/${html_yellow}Liferay-Portal${html_off}/g" \ + -e "s/X-Cache-Lookup/${html_yellow}X-Cache-Lookup${html_off}/g" \ + -e "s/X-Cache/${html_yellow}X-Cache${html_off}/g" \ + -e "s/X-Squid/${html_yellow}X-Squid${html_off}/g" \ + -e "s/X-Server/${html_yellow}X-Server${html_off}/g" \ + -e "s/X-Varnish/${html_yellow}X-Varnish${html_off}/g" \ + -e "s/X-OWA-Version/${html_yellow}X-OWA-Version${html_off}/g" \ + -e "s/MicrosoftSharePointTeamServices/${html_yellow}MicrosoftSharePointTeamServices${html_off}/g" \ + -e "s/X-Application-Context/${html_yellow}X-Application-Context${html_off}/g" \ + -e "s/X-Version/${html_yellow}X-Version${html_off}/g" \ + -e "s/X-Powered-By/${html_yellow}X-Powered-By${html_off}/g" \ + -e "s/X-UA-Compatible/${html_yellow}X-UA-Compatible${html_off}/g" \ + -e "s/Link/${html_yellow}Link${html_off}/g" \ + -e "s/X-Runtime/${html_yellow}X-Runtime${html_off}/g" \ + -e "s/X-Rack-Cache/${html_yellow}X-Rack-Cache${html_off}/g" \ + -e "s/X-Pingback/${html_yellow}X-Pingback${html_off}/g" \ + -e "s/X-Permitted-Cross-Domain-Policies/${html_yellow}X-Permitted-Cross-Domain-Policies${html_off}/g" \ + -e "s/X-AspNet-Version/${html_yellow}X-AspNet-Version${html_off}/g")" \ + -e "s/x-note/${html_yellow}x-note${html_off}/g" \ + -e "s/X-Global-Transaction-ID/${html_yellow}X-Global-Transaction-ID${html_off}/g" \ + -e "s/x-global-transaction-id/${html_yellow}x-global-transaction-id${html_off}/g" \ + -e "s/Alt-Svc/${html_yellow}Alt-Svc${html_off}/g" \ + -e "s/system-wsgw-management-loopback/${html_yellow}system-wsgw-management-loopback${html_off}/g" +#FIXME: this is double code. The pattern to emphasize would fit better into +# one function. +# Also we need another function like run_other_header as otherwise "Link" "Alt-Svc" will never be found. +# And: I matches case sensitive only which might not detect all banners. (sed ignorecase is not possible w/ BSD sed) + else + html_out "$(html_reserved "$1")" + fi + html_out "\n" + fi +} + +run_server_banner() { + local serverbanner + local jsonID="banner_server" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " Server banner " + grep -ai '^Server' $HEADERFILE >$TMPFILE + if [[ $? -eq 0 ]]; then + serverbanner=$(sed -e 's/^Server: //' -e 's/^server: //' $TMPFILE) + if [[ "$serverbanner" == $'\n' ]] || [[ "$serverbanner" == $'\r' ]] || [[ "$serverbanner" == $'\n\r' ]] || [[ -z "$serverbanner" ]]; then + outln "exists but empty string" + fileout "$jsonID" "INFO" "Server banner is empty" + else + emphasize_stuff_in_headers "$serverbanner" + fileout "$jsonID" "INFO" "$serverbanner" + if [[ "$serverbanner" == *Microsoft-IIS/6.* ]] && [[ $OSSL_VER == 1.0.2* ]]; then + prln_warning " It's recommended to run another test w/ OpenSSL 1.0.1 !" + # see https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892 + fileout "${jsonID}" "WARN" "IIS6_openssl_mismatch: Recommended to rerun this test w/ OpenSSL 1.0.1. See https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892" + fi + fi + # mozilla.github.io/server-side-tls/ssl-config-generator/ + # https://support.microsoft.com/en-us/kb/245030 + else + outln "(no \"Server\" line in header, interesting!)" + fileout "$jsonID" "INFO" "No Server banner line in header, interesting!" + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + +run_appl_banner() { + local line + local first=true + local spaces=" " + local appl_banners="" + local jsonID="banner_application" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " Application banner " + grep -Eai '^X-Powered-By|^X-AspNet-Version|^X-Version|^Liferay-Portal|^X-OWA-Version^|^MicrosoftSharePointTeamServices' $HEADERFILE >$TMPFILE + if [[ $? -ne 0 ]]; then + outln "--" + fileout "$jsonID" "INFO" "No application banner found" + else + while IFS='' read -r line; do + line=$(strip_lf "$line") + if ! $first; then + out "$spaces" + appl_banners="${appl_banners}, ${line}" + else + appl_banners="${line}" + first=false + fi + emphasize_stuff_in_headers "$line" + done < "$TMPFILE" + fileout "$jsonID" "INFO" "$appl_banners" + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + +run_rp_banner() { + local line + local first=true + local spaces=" " + local rp_banners="" + local jsonID="banner_reverseproxy" + local cwe="CWE-200" + local cve="" + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + pr_bold " Reverse Proxy banner " + grep -Eai '^Via:|^X-Cache|^X-Squid|^X-Varnish:|^X-Server-Name:|^X-Server-Port:|^x-forwarded|^Forwarded' $HEADERFILE >$TMPFILE + if [[ $? -ne 0 ]]; then + outln "--" + fileout "$jsonID" "INFO" "--" "$cve" "$cwe" + else + while read line; do + line=$(strip_lf "$line") + if $first; then + first=false + else + out "$spaces" + fi + emphasize_stuff_in_headers "$line" + rp_banners="${rp_banners}${line}" + done < $TMPFILE + fileout "$jsonID" "INFO" "$rp_banners" "$cve" "$cwe" + fi + outln + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +# arg1: multiline string w cookies +# +sub_f5_bigip_check() { + local allcookies="$1" + local ip port cookievalue cookiename + local routed_domain offset + local savedcookies="" + local spaces="$2" + local cwe="CWE-212" + local cve="" + + # taken from https://github.com/drwetter/F5-BIGIP-Decoder, more details see there + + debugme echo -e "all cookies: >> $allcookies <<\n" + while true; do IFS='=' read cookiename cookievalue + [[ -z "$cookievalue" ]] && break + cookievalue=${cookievalue/;/} + debugme echo $cookiename : $cookievalue + if grep -Eq '[0-9]{9,10}\.[0-9]{3,5}\.0000' <<< "$cookievalue"; then + ip="$(f5_ip_oldstyle "$cookievalue")" + port="$(f5_port_decode $cookievalue)" + out "${spaces}F5 cookie (default IPv4 pool member): "; pr_italic "$cookiename "; prln_svrty_medium "${ip}:${port}" + fileout "cookie_bigip_f5" "MEDIUM" "Information leakage: F5 cookie $cookiename $cookievalue is default IPv4 pool member ${ip}:${port}" "$cve" "$cwe" + elif grep -Eq '^rd[0-9]{1,3}o0{20}f{4}[a-f0-9]{8}o[0-9]{1,5}' <<< "$cookievalue"; then + routed_domain="$(f5_determine_routeddomain "$cookievalue")" + offset=$(( 2 + ${#routed_domain} + 1 + 24)) + port="${cookievalue##*o}" + ip="$(f5_hex2ip "${cookievalue:$offset:8}")" + out "${spaces}F5 cookie (IPv4 pool in routed domain "; pr_svrty_medium "$routed_domain"; out "): "; pr_italic "$cookiename "; prln_svrty_medium "${ip}:${port}" + fileout "cookie_bigip_f5" "MEDIUM" "Information leakage: F5 cookie $cookiename $cookievalue is IPv4 pool member in routed domain $routed_domain ${ip}:${port}" "$cve" "$cwe" + elif grep -Eq '^vi[a-f0-9]{32}\.[0-9]{1,5}' <<< "$cookievalue"; then + ip="$(f5_hex2ip6 ${cookievalue:2:32})" + port="${cookievalue##*.}" + port=$(f5_port_decode "$port") + out "${spaces}F5 cookie (default IPv6 pool member): "; pr_italic "$cookiename "; prln_svrty_medium "${ip}:${port}" + fileout "cookie_bigip_f5" "MEDIUM" "Information leakage: F5 cookie $cookiename $cookievalue is default IPv6 pool member ${ip}:${port}" "$cve" "$cwe" + elif grep -Eq '^rd[0-9]{1,3}o[a-f0-9]{32}o[0-9]{1,5}' <<< "$cookievalue"; then + routed_domain="$(f5_determine_routeddomain "$cookievalue")" + offset=$(( 2 + ${#routed_domain} + 1 )) + port="${cookievalue##*o}" + ip="$(f5_hex2ip6 ${cookievalue:$offset:32})" + out "${spaces}F5 cookie (IPv6 pool in routed domain "; pr_svrty_medium "$routed_domain"; out "): "; pr_italic "$cookiename "; prln_svrty_medium "${ip}:${port}" + fileout "cookie_bigip_f5" "MEDIUM" "Information leakage: F5 cookie $cookiename $cookievalue is IPv6 pool member in routed domain $routed_domain ${ip}:${port}" "$cve" "$cwe" + elif grep -Eq '^\!.*=$' <<< "$cookievalue"; then + if [[ "${#cookievalue}" -eq 81 ]] ; then + savedcookies="${savedcookies} ${cookiename}=${cookievalue:1:79}" + out "${spaces}Encrypted F5 cookie named "; pr_italic "${cookiename}"; outln " detected" + fileout "cookie_bigip_f5" "INFO" "encrypted F5 cookie named ${cookiename}" + fi + fi + done <<< "$allcookies" +} + + +run_cookie_flags() { # ARG1: Path + local -i nr_cookies + local -i nr_httponly nr_secure + local negative_word + local msg302="" msg302_="" + local spaces=" " + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + + if [[ ! "$HTTP_STATUS_CODE" =~ 20 ]]; then + if [[ "$HTTP_STATUS_CODE" =~ [301|302] ]]; then + msg302=" -- maybe better try target URL of 30x" + msg302_=" (30x detected, better try target URL of 30x)" + else + msg302=" -- HTTP status $HTTP_STATUS_CODE signals you maybe missed the web application" + msg302_=" (maybe missed the application)" + fi + fi + + pr_bold " Cookie(s) " + grep -ai '^Set-Cookie' $HEADERFILE >$TMPFILE + if [[ $? -ne 0 ]]; then + outln "(none issued at \"$1\")$msg302" + fileout "cookie_count" "INFO" "0 at \"$1\"$msg302_" + else + nr_cookies=$(count_lines "$(cat $TMPFILE)") + out "$nr_cookies issued: " + fileout "cookie_count" "INFO" "$nr_cookies at \"$1\"$msg302_" + if [[ $nr_cookies -gt 1 ]]; then + negative_word="NONE" + else + negative_word="NOT" + fi + nr_secure=$(grep -iac secure $TMPFILE) + case $nr_secure in + 0) pr_svrty_medium "$negative_word" ;; + [123456789]) pr_svrty_good "$nr_secure/$nr_cookies";; + esac + out " secure, " + if [[ $nr_cookies -eq $nr_secure ]]; then + fileout "cookie_secure" "OK" "All ($nr_cookies) at \"$1\" marked as secure" + else + fileout "cookie_secure" "INFO" "$nr_secure/$nr_cookies at \"$1\" marked as secure" + fi + nr_httponly=$(grep -cai httponly $TMPFILE) + case $nr_httponly in + 0) pr_svrty_medium "$negative_word" ;; + [123456789]) pr_svrty_good "$nr_httponly/$nr_cookies";; + esac + out " HttpOnly" + if [[ $nr_cookies -eq $nr_httponly ]]; then + fileout "cookie_httponly" "OK" "All ($nr_cookies) at \"$1\" marked as HttpOnly$msg302_" + else + fileout "cookie_httponly" "INFO" "$nr_secure/$nr_cookies at \"$1\" marked as HttpOnly$msg302_" + fi + outln "$msg302" + allcookies="$(awk '/[Ss][Ee][Tt]-[Cc][Oo][Oo][Kk][Ii][Ee]:/ { print $2 }' "$TMPFILE")" + sub_f5_bigip_check "$allcookies" "$spaces" + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +run_security_headers() { + local good_header="X-Frame-Options X-XSS-Protection X-Content-Type-Options Content-Security-Policy X-Content-Security-Policy X-WebKit-CSP Content-Security-Policy-Report-Only Expect-CT" + local other_header="Access-Control-Allow-Origin Upgrade X-Served-By Referrer-Policy X-UA-Compatible Cache-Control Pragma" + local header header_output + local first=true + local spaces=" " + local have_header=false + + if [[ ! -s $HEADERFILE ]]; then + run_http_header "$1" || return 1 + fi + + pr_bold " Security headers " + for header in $good_header; do + [[ "$DEBUG" -ge 5 ]] && echo "testing \"$header\"" + match_httpheader_key "$header" "$header" "$spaces" "$first" + if [[ $? -ge 1 ]]; then + have_header=true + if "$first"; then + first=false + fi + # Include $header when determining where to insert line breaks, but print $header + # separately. + pr_svrty_good "$header" + header_output="$(out_row_aligned_max_width "${header:2} $HEADERVALUE" "$spaces " $TERM_WIDTH)" + outln "${header_output#${header:2}}" + fileout "$header" "OK" "$HEADERVALUE" + fi + done + + for header in $other_header; do + [[ "$DEBUG" -ge 5 ]] && echo "testing \"$header\"" + match_httpheader_key "$header" "$header" "$spaces" "$first" + if [[ $? -ge 1 ]]; then + have_header=true + if "$first"; then + first=false + fi + pr_litecyan "$header" + outln " $HEADERVALUE" # shouldn't be that long + fileout "$header" "INFO" "$HEADERVALUE" + fi + done + #TODO: I am not testing for the correctness or anything stupid yet, e.g. "X-Frame-Options: allowall" or Access-Control-Allow-Origin: * + + if ! "$have_header"; then + prln_svrty_medium "--" + fileout "security_headers" "MEDIUM" "--" + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +# #1: string with 2 opensssl codes, output is same in NSS/ssllabs terminology +normalize_ciphercode() { + if [[ "${1:2:2}" == "00" ]]; then + tm_out "$(tolower "x${1:7:2}")" + else + tm_out "$(tolower "x${1:2:2}${1:7:2}${1:12:2}")" + fi + return 0 +} + +prettyprint_local() { + local arg line + local hexc hexcode dash ciph sslvers kx auth enc mac export + local re='^[0-9A-Fa-f]+$' + + if [[ "$1" == 0x* ]] || [[ "$1" == 0X* ]]; then + fatal "pls supply x instead" $ERR_CMDLINE + fi + + if [[ -z "$1" ]]; then + pr_headline " Displaying all $OPENSSL_NR_CIPHERS local ciphers "; + else + pr_headline " Displaying all local ciphers "; + # pattern provided; which one? + [[ $1 =~ $re ]] && \ + pr_headline "matching number pattern \"$1\" " || \ + pr_headline "matching word pattern "\"$1\"" (ignore case) " + fi + outln "\n" + neat_header + + if [[ -z "$1" ]]; then + actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-V" | while read -r hexcode dash ciph sslvers kx auth enc mac export ; do # -V doesn't work with openssl < 1.0 + hexc="$(normalize_ciphercode $hexcode)" + outln "$(neat_list "$hexc" "$ciph" "$kx" "$enc")" + done + else + #for arg in $(echo $@ | sed 's/,/ /g'); do + for arg in ${*//,/ /}; do + actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-V" | while read -r hexcode dash ciph sslvers kx auth enc mac export ; do # -V doesn't work with openssl < 1.0 + hexc="$(normalize_ciphercode $hexcode)" + # for numbers we don't do word matching: + [[ $arg =~ $re ]] && \ + line="$(neat_list "$hexc" "$ciph" "$kx" "$enc" | grep -ai "$arg")" || \ + line="$(neat_list "$hexc" "$ciph" "$kx" "$enc" | grep -wai "$arg")" + [[ -n "$line" ]] && outln "$line" + done + done + fi + outln + return 0 +} + + +# Generic function for a rated output, no used yet. +# arg1: rating from 2 to -4 if available or not +# arg2: no/yes: decides whether positive or negative logic will be applied and "not" will be printed +# arg3: jsonID +# +rated_output() { + local jsonID=$3 + local logic="" + + if [[ $2 == no ]] || [[ $2 == negative ]]; then + logic="not " + fi + case $1 in + 2) pr_svrty_best "${logic}offered (OK)" + fileout "${jsonID}" "OK" "${logic}offered" + ;; + 1) pr_svrty_good "${logic}offered (OK)" + fileout "${jsonID}" "OK" "${logic}offered" + ;; + 0) out "${logic}offered" + fileout "${jsonID}" "INFO" "${logic}offered" + ;; + -1) pr_svrty_low "${logic}offered" + fileout "${jsonID}" "LOW" "${logic}offered" + ;; + -2) pr_svrty_medium "${logic}offered" + fileout "${jsonID}" "MEDIUM" "${logic}offered" + ;; + -3) pr_svrty_high "${logic}offered (NOT ok)" + fileout "${jsonID}" "HIGH" "${logic}offered" + ;; + -4) pr_svrty_critical "${logic}offered (NOT ok)" + fileout "${jsonID}" "CRITICAL" "${logic}offered" + ;; + *) pr_warning "FIXME: error around $LINENO, (please report this)" + fileout "${jsonID}" "WARN" "return condition $2 when $1 unclear" + return 1 + ;; + esac + return 0 +} + + +openssl2rfc() { + local rfcname="" + local -i i + + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + [[ "$1" == ${TLS_CIPHER_OSSL_NAME[i]} ]] && rfcname="${TLS_CIPHER_RFC_NAME[i]}" && break + done + [[ "$rfcname" == "-" ]] && rfcname="" + [[ -n "$rfcname" ]] && tm_out "$rfcname" + return 0 +} + +rfc2openssl() { + local ossl_name + local -i i + + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + [[ "$1" == ${TLS_CIPHER_RFC_NAME[i]} ]] && ossl_name="${TLS_CIPHER_OSSL_NAME[i]}" && break + done + [[ "$ossl_name" == "-" ]] && ossl_name="" + [[ -n "$ossl_name" ]] && tm_out "$ossl_name" + return 0 +} + +openssl2hexcode() { + local hexc="" + local -i i + + if [[ $TLS_NR_CIPHERS -eq 0 ]]; then + if "$HAS_CIPHERSUITES"; then + hexc="$($OPENSSL ciphers -V -ciphersuites "$TLS13_OSSL_CIPHERS" 'ALL:COMPLEMENTOFALL:@STRENGTH' | awk '/ '"$1"' / { print $1 }')" + else + hexc="$($OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' | awk '/ '"$1"' / { print $1 }')" + fi + else + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + [[ "$1" == ${TLS_CIPHER_OSSL_NAME[i]} ]] && hexc="${TLS_CIPHER_HEXCODE[i]}" && break + done + fi + [[ -z "$hexc" ]] && return 1 + tm_out "$hexc" + return 0 +} + +rfc2hexcode() { + local hexc="" + local -i i + + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + [[ "$1" == "${TLS_CIPHER_RFC_NAME[i]}" ]] && hexc="${TLS_CIPHER_HEXCODE[i]}" && break + done + [[ -z "$hexc" ]] && return 1 + tm_out "$hexc" + return 0 +} + +show_rfc_style(){ + local rfcname="" hexcode + local -i i + + hexcode="$(toupper "$1")" + case ${#hexcode} in + 3) hexcode="0x00,0x${hexcode:1:2}" ;; + 5) hexcode="0x${hexcode:1:2},0x${hexcode:3:2}" ;; + 7) hexcode="0x${hexcode:1:2},0x${hexcode:3:2},0x${hexcode:5:2}" ;; + *) return 1 ;; + esac + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + [[ "$hexcode" == ${TLS_CIPHER_HEXCODE[i]} ]] && rfcname="${TLS_CIPHER_RFC_NAME[i]}" && break + done + [[ "$rfcname" == "-" ]] && rfcname="" + [[ -n "$rfcname" ]] && tm_out "$rfcname" + return 0 +} + +neat_header(){ + if [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]]; then + out "$(printf -- "Hexcode Cipher Suite Name (IANA/RFC) KeyExch. Encryption Bits")" + [[ "$DISPLAY_CIPHERNAMES" != rfc-only ]] && out "$(printf -- " Cipher Suite Name (OpenSSL)")" + outln + out "$(printf -- "%s------------------------------------------------------------------------------------------")" + [[ "$DISPLAY_CIPHERNAMES" != rfc-only ]] && out "$(printf -- "---------------------------------------")" + outln + else + out "$(printf -- "Hexcode Cipher Suite Name (OpenSSL) KeyExch. Encryption Bits")" + [[ "$DISPLAY_CIPHERNAMES" != openssl-only ]] && out "$(printf -- " Cipher Suite Name (IANA/RFC)")" + outln + out "$(printf -- "%s--------------------------------------------------------------------------")" + [[ "$DISPLAY_CIPHERNAMES" != openssl-only ]] && out "$(printf -- "---------------------------------------------------")" + outln + fi +} + + +# arg1: hexcode +# arg2: cipher in openssl notation +# arg3: keyexchange +# arg4: encryption (maybe included "export") +# arg5: "true" if the cipher's "quality" should be highlighted +# "false" if the line should be printed in light grey +# empty if line should be returned as a string +neat_list(){ + local hexcode="$1" + local ossl_cipher="$2" tls_cipher="" + local kx enc strength line what_dh bits + local -i i len + + kx="${3//Kx=/}" + enc="${4//Enc=/}" + # In two cases LibreSSL uses very long names for encryption algorithms + # and doesn't include the number of bits. + [[ "$enc" == ChaCha20-Poly1305 ]] && enc="CHACHA20(256)" + [[ "$enc" == GOST-28178-89-CNT ]] && enc="GOST(256)" + + strength="${enc//\)/}" # retrieve (). first remove traling ")" + strength="${strength#*\(}" # exfiltrate (VAL + enc="${enc%%\(*}" + + enc="${enc//POLY1305/}" # remove POLY1305 + enc="${enc//\//}" # remove "/" + + [[ "$export" =~ export ]] && strength="$strength,exp" + + [[ "$DISPLAY_CIPHERNAMES" != openssl-only ]] && tls_cipher="$(show_rfc_style "$hexcode")" + + if [[ "$5" != true ]]; then + if [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]]; then + line="$(printf -- " %-7s %-49s %-10s %-12s%-8s" "$hexcode" "$tls_cipher" "$kx" "$enc" "$strength")" + [[ "$DISPLAY_CIPHERNAMES" != rfc-only ]] && line+="$(printf -- " %-33s${SHOW_EACH_C:+ %-0s}" "$ossl_cipher")" + else + line="$(printf -- " %-7s %-33s %-10s %-12s%-8s" "$hexcode" "$ossl_cipher" "$kx" "$enc" "$strength")" + [[ "$DISPLAY_CIPHERNAMES" != openssl-only ]] && line+="$(printf -- " %-49s${SHOW_EACH_C:+ %-0s}" "$tls_cipher")" + fi + if [[ -z "$5" ]]; then + tm_out "$line" + else + pr_deemphasize "$line" + fi + return 0 + fi + if [[ "$kx" =~ " " ]]; then + what_dh="${kx%% *}" + bits="${kx##* }" + else + what_dh="$kx" + bits="" + fi + if [[ "$COLOR" -le 2 ]]; then + if [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]]; then + out "$(printf -- " %-7s %-49s " "$hexcode" "$tls_cipher")" + else + out "$(printf -- " %-7s %-33s " "$hexcode" "$ossl_cipher")" + fi + else + out "$(printf -- " %-7s " "$hexcode")" + if [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]]; then + print_fixed_width "$tls_cipher" 49 pr_cipher_quality + else + print_fixed_width "$ossl_cipher" 33 pr_cipher_quality + fi + fi + out "$what_dh" + if [[ -n "$bits" ]]; then + if [[ $what_dh == DH ]] || [[ $what_dh == EDH ]]; then + pr_dh_quality "$bits" " $bits" + elif [[ $what_dh == ECDH ]]; then + pr_ecdh_quality "$bits" " $bits" + fi + fi + len=${#kx} + for (( i=len; i<10; i++ )); do + out " " + done + out "$(printf -- " %-12s%-8s " "$enc" "$strength")" + if [[ "$COLOR" -le 2 ]]; then + if [[ "$DISPLAY_CIPHERNAMES" == rfc ]]; then + out "$(printf -- "%-33s${SHOW_EACH_C:+ %-0s}" "$ossl_cipher")" + elif [[ "$DISPLAY_CIPHERNAMES" == openssl ]]; then + out "$(printf -- "%-49s${SHOW_EACH_C:+ %-0s}" "$tls_cipher")" + fi + else + if [[ "$DISPLAY_CIPHERNAMES" == rfc ]]; then + print_fixed_width "$ossl_cipher" 32 pr_cipher_quality + elif [[ "$DISPLAY_CIPHERNAMES" == openssl ]]; then + print_fixed_width "$tls_cipher" 48 pr_cipher_quality + fi + out "$(printf -- "${SHOW_EACH_C:+ %-0s}")" + fi +} + +run_cipher_match(){ + local hexc n auth export ciphers_to_test tls13_ciphers_to_test supported_sslv2_ciphers s + local -a hexcode normalized_hexcode ciph sslvers kx enc export2 sigalg + local -a ciphers_found ciphers_found2 ciph2 rfc_ciph rfc_ciph2 ossl_supported + local -a -i index + local -i nr_ciphers=0 nr_ossl_ciphers=0 nr_nonossl_ciphers=0 + local -i num_bundles mod_check bundle_size bundle end_of_bundle + local dhlen has_dh_bits="$HAS_DH_BITS" + local cipher proto protos_to_try + local available + local -i sclient_success + local re='^[0-9A-Fa-f]+$' + local using_sockets=true + + "$SSL_NATIVE" && using_sockets=false + "$FAST" && using_sockets=false + [[ $TLS_NR_CIPHERS == 0 ]] && using_sockets=false + + pr_headline " Testing ciphers with " + if [[ $1 =~ $re ]]; then + pr_headline "matching number pattern \"$1\" " + tjolines="$tjolines matching number pattern \"$1\"\n\n" + else + pr_headline "word pattern "\"$1\"" (ignore case) " + tjolines="$tjolines word pattern \"$1\" (ignore case)\n\n" + fi + outln + if ! "$using_sockets"; then + [[ $TLS_NR_CIPHERS == 0 ]] && ! "$SSL_NATIVE" && ! "$FAST" && pr_warning " Cipher mapping not available, doing a fallback to openssl" + if ! "$HAS_DH_BITS"; then + [[ $TLS_NR_CIPHERS == 0 ]] && ! "$SSL_NATIVE" && ! "$FAST" && out "." + prln_warning " (Your $OPENSSL cannot show DH/ECDH bits)" + fi + fi + outln + neat_header + #for arg in $(echo $@ | sed 's/,/ /g'); do + for arg in ${*//, /}; do + if "$using_sockets" || [[ $OSSL_VER_MAJOR -lt 1 ]]; then + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + hexc="${TLS_CIPHER_HEXCODE[i]}" + if [[ ${#hexc} -eq 9 ]]; then + hexcode[nr_ciphers]="${hexc:2:2},${hexc:7:2}" + if [[ "${hexc:2:2}" == "00" ]]; then + normalized_hexcode[nr_ciphers]="x${hexc:7:2}" + else + normalized_hexcode[nr_ciphers]="x${hexc:2:2}${hexc:7:2}" + fi + else + hexc="$(tolower "$hexc")" + hexcode[nr_ciphers]="${hexc:2:2},${hexc:7:2},${hexc:12:2}" + normalized_hexcode[nr_ciphers]="x${hexc:2:2}${hexc:7:2}${hexc:12:2}" + fi + if [[ $arg =~ $re ]]; then + neat_list "${normalized_hexcode[nr_ciphers]}" "${TLS_CIPHER_OSSL_NAME[i]}" "${TLS_CIPHER_KX[i]}" "${TLS_CIPHER_ENC[i]}" | grep -qai "$arg" + else + neat_list "${normalized_hexcode[nr_ciphers]}" "${TLS_CIPHER_OSSL_NAME[i]}" "${TLS_CIPHER_KX[i]}" "${TLS_CIPHER_ENC[i]}" | grep -qwai "$arg" + fi + if [[ $? -eq 0 ]] && ( "$using_sockets" || "${TLS_CIPHER_OSSL_SUPPORTED[i]}" ); then # string matches, so we can ssl to it: + normalized_hexcode[nr_ciphers]="$(tolower "${normalized_hexcode[nr_ciphers]}")" + ciph[nr_ciphers]="${TLS_CIPHER_OSSL_NAME[i]}" + rfc_ciph[nr_ciphers]="${TLS_CIPHER_RFC_NAME[i]}" + kx[nr_ciphers]="${TLS_CIPHER_KX[i]}" + enc[nr_ciphers]="${TLS_CIPHER_ENC[i]}" + sslvers[nr_ciphers]="${TLS_CIPHER_SSLVERS[i]}" + export2[nr_ciphers]="${TLS_CIPHER_EXPORT[i]}" + ciphers_found[nr_ciphers]=false + sigalg[nr_ciphers]="" + ossl_supported[nr_ciphers]="${TLS_CIPHER_OSSL_SUPPORTED[i]}" + if "$using_sockets" && ! "$has_dh_bits" && \ + ( [[ ${kx[nr_ciphers]} == "Kx=ECDH" ]] || [[ ${kx[nr_ciphers]} == "Kx=DH" ]] || [[ ${kx[nr_ciphers]} == "Kx=EDH" ]] ); then + ossl_supported[nr_ciphers]=false + fi + nr_ciphers+=1 + fi + done + else + while read hexc n ciph[nr_ciphers] sslvers[nr_ciphers] kx[nr_ciphers] auth enc[nr_ciphers] mac export2[nr_ciphers]; do + hexc="$(normalize_ciphercode $hexc)" + # is argument a number? + if [[ $arg =~ $re ]]; then + neat_list "$hexc" "${ciph[nr_ciphers]}" "${kx[nr_ciphers]}" "${enc[nr_ciphers]}" | grep -qai "$arg" + else + neat_list "$hexc" "${ciph[nr_ciphers]}" "${kx[nr_ciphers]}" "${enc[nr_ciphers]}" | grep -qwai "$arg" + fi + if [[ $? -eq 0 ]]; then # string matches, so we can ssl to it: + ciphers_found[nr_ciphers]=false + normalized_hexcode[nr_ciphers]="$hexc" + sigalg[nr_ciphers]="" + ossl_supported[nr_ciphers]=true + nr_ciphers+=1 + fi + done < <(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-V") + fi + + # Test the SSLv2 ciphers, if any. + if "$using_sockets"; then + ciphers_to_test="" + for (( i=0; i < nr_ciphers; i++ )); do + if [[ "${sslvers[i]}" == SSLv2 ]]; then + ciphers_to_test+=", ${hexcode[i]}" + fi + done + if [[ -n "$ciphers_to_test" ]]; then + sslv2_sockets "${ciphers_to_test:2}" "true" + if [[ $? -eq 3 ]] && [[ "$V2_HELLO_CIPHERSPEC_LENGTH" -ne 0 ]]; then + supported_sslv2_ciphers="$(grep "Supported cipher: " "$TEMPDIR/$NODEIP.parse_sslv2_serverhello.txt")" + "$SHOW_SIGALGO" && s="$(read_sigalg_from_file "$HOSTCERT")" + for (( i=0 ; i$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_ossl_ciphers/128 + mod_check=$nr_ossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_ossl_ciphers/$num_bundles + mod_check=$nr_ossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + if "$HAS_TLS13"; then + protos_to_try="-no_ssl2 -tls1_2 -tls1_1 -tls1" + else + protos_to_try="-no_ssl2 -tls1_1 -tls1" + fi + "$HAS_SSL3" && protos_to_try+=" -ssl3" + + for proto in $protos_to_try; do + if [[ "$proto" == -tls1_1 ]]; then + num_bundles=1 + bundle_size=$nr_ossl_ciphers + fi + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_ossl_ciphers ]] && end_of_bundle=$nr_ossl_ciphers + while true; do + ciphers_to_test="" + tls13_ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + if ! "${ciphers_found2[i]}"; then + if [[ "${ciph2[i]}" == TLS13* ]] || [[ "${ciph2[i]}" == TLS_* ]]; then + tls13_ciphers_to_test+=":${ciph2[i]}" + else + ciphers_to_test+=":${ciph2[i]}" + fi + fi + done + [[ -z "$ciphers_to_test" ]] && [[ -z "$tls13_ciphers_to_test" ]] && break + $OPENSSL s_client $(s_client_options "$proto -cipher "\'${ciphers_to_test:1}\'" -ciphersuites "\'${tls13_ciphers_to_test:1}\'" $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_nonossl_ciphers/128 + mod_check=$nr_nonossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_nonossl_ciphers/$num_bundles + mod_check=$nr_nonossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + for proto in 04 03 02 01 00; do + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_nonossl_ciphers ]] && end_of_bundle=$nr_nonossl_ciphers + while true; do + ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + ! "${ciphers_found2[i]}" && ciphers_to_test+=", ${hexcode2[i]}" + done + [[ -z "$ciphers_to_test" ]] && break + [[ "$proto" == 04 ]] && [[ ! "$ciphers_to_test" =~ ,\ 13,[0-9a-f][0-9a-f] ]] && break + ciphers_to_test="$(strip_inconsistent_ciphers "$proto" "$ciphers_to_test")" + [[ -z "$ciphers_to_test" ]] && break + if "$SHOW_SIGALGO"; then + tls_sockets "$proto" "${ciphers_to_test:2}, 00,ff" "all" + else + tls_sockets "$proto" "${ciphers_to_test:2}, 00,ff" "ephemeralkey" + fi + sclient_success=$? + [[ $sclient_success -ne 0 ]] && [[ $sclient_success -ne 2 ]] && break + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + [[ "$cipher" == "${rfc_ciph2[i]}" ]] && ciphers_found2[i]=true && break + done + [[ $i -eq $end_of_bundle ]] && break + i=${index[i]} + ciphers_found[i]=true + [[ "${kx[i]}" == "Kx=any" ]] && kx[i]="$(read_dhtype_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt")" + if [[ ${kx[i]} == "Kx=ECDH" ]] || [[ ${kx[i]} == "Kx=DH" ]] || [[ ${kx[i]} == "Kx=EDH" ]]; then + dhlen=$(read_dhbits_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" quiet) + kx[i]="${kx[i]} $dhlen" + fi + "$SHOW_SIGALGO" && [[ -r "$HOSTCERT" ]] && \ + sigalg[i]="$(read_sigalg_from_file "$HOSTCERT")" + done + done + done + + for (( i=0; i < nr_ciphers; i++ )); do + "${ciphers_found[i]}" || "$SHOW_EACH_C" || continue + export="${export2[i]}" + neat_list "${normalized_hexcode[i]}" "${ciph[i]}" "${kx[i]}" "${enc[i]}" "${ciphers_found[i]}" + available="" + if "$SHOW_EACH_C"; then + if "${ciphers_found[i]}"; then + available="available" + pr_cyan "available" + else + available="not a/v" + pr_deemphasize "not a/v" + fi + fi + outln "${sigalg[i]}" + fileout "cipher_${normalized_hexcode[i]}" "INFO" "$(neat_list "${normalized_hexcode[i]}" "${ciph[i]}" "${kx[i]}" "${enc[i]}") $available" + done + "$using_sockets" && HAS_DH_BITS="$has_dh_bits" + tmpfile_handle ${FUNCNAME[0]}.txt + done + outln + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 # this is a single test for a cipher +} + + + +# test for all ciphers locally configured (w/o distinguishing whether they are good or bad) +run_allciphers() { + local -i nr_ciphers_tested=0 nr_ciphers=0 nr_ossl_ciphers=0 nr_nonossl_ciphers=0 sclient_success=0 + local n auth mac export hexc sslv2_ciphers="" s + local -a normalized_hexcode hexcode ciph sslvers kx enc export2 sigalg ossl_supported + local -i i end_of_bundle bundle bundle_size num_bundles mod_check + local -a ciphers_found ciphers_found2 hexcode2 ciph2 rfc_ciph2 + local -i -a index + local proto protos_to_try + local dhlen available ciphers_to_test tls13_ciphers_to_test supported_sslv2_ciphers + local has_dh_bits="$HAS_DH_BITS" + local using_sockets=true + + "$SSL_NATIVE" && using_sockets=false + "$FAST" && using_sockets=false + [[ $TLS_NR_CIPHERS == 0 ]] && using_sockets=false + + # get a list of all the cipher suites to test + if "$using_sockets" || [[ $OSSL_VER_MAJOR -lt 1 ]]; then + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + hexc="$(tolower "${TLS_CIPHER_HEXCODE[i]}")" + ciph[i]="${TLS_CIPHER_OSSL_NAME[i]}" + sslvers[i]="${TLS_CIPHER_SSLVERS[i]}" + kx[i]="${TLS_CIPHER_KX[i]}" + enc[i]="${TLS_CIPHER_ENC[i]}" + export2[i]="${TLS_CIPHER_EXPORT[i]}" + ciphers_found[i]=false + sigalg[i]="" + ossl_supported[i]=${TLS_CIPHER_OSSL_SUPPORTED[i]} + if "$using_sockets" && ! "$HAS_DH_BITS" && ( [[ ${kx[i]} == Kx=ECDH ]] || [[ ${kx[i]} == Kx=DH ]] || [[ ${kx[i]} == Kx=EDH ]] ); then + ossl_supported[i]=false + fi + if [[ ${#hexc} -eq 9 ]]; then + hexcode[i]="${hexc:2:2},${hexc:7:2}" + if [[ "${hexc:2:2}" == 00 ]]; then + normalized_hexcode[i]="x${hexc:7:2}" + else + normalized_hexcode[i]="x${hexc:2:2}${hexc:7:2}" + fi + else + hexcode[i]="${hexc:2:2},${hexc:7:2},${hexc:12:2}" + normalized_hexcode[i]="x${hexc:2:2}${hexc:7:2}${hexc:12:2}" + sslv2_ciphers="$sslv2_ciphers, ${hexcode[i]}" + fi + if "$using_sockets" || "${TLS_CIPHER_OSSL_SUPPORTED[i]}"; then + nr_ciphers_tested+=1 + fi + done + nr_ciphers=$TLS_NR_CIPHERS + else + while read -r hexc n ciph[nr_ciphers] sslvers[nr_ciphers] kx[nr_ciphers] auth enc[nr_ciphers] mac export2[nr_ciphers]; do + ciphers_found[nr_ciphers]=false + if [[ ${#hexc} -eq 9 ]]; then + if [[ "${hexc:2:2}" == 00 ]]; then + normalized_hexcode[nr_ciphers]="$(tolower "x${hexc:7:2}")" + else + normalized_hexcode[nr_ciphers]="$(tolower "x${hexc:2:2}${hexc:7:2}")" + fi + else + normalized_hexcode[nr_ciphers]="$(tolower "x${hexc:2:2}${hexc:7:2}${hexc:12:2}")" + fi + sigalg[nr_ciphers]="" + ossl_supported[nr_ciphers]=true + nr_ciphers=$nr_ciphers+1 + done < <(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-V") + nr_ciphers_tested=$nr_ciphers + fi + + if "$using_sockets"; then + sslv2_sockets "${sslv2_ciphers:2}" "true" + if [[ $? -eq 3 ]] && [[ "$V2_HELLO_CIPHERSPEC_LENGTH" -ne 0 ]]; then + supported_sslv2_ciphers="$(grep "Supported cipher: " "$TEMPDIR/$NODEIP.parse_sslv2_serverhello.txt")" + "$SHOW_SIGALGO" && s="$(read_sigalg_from_file "$HOSTCERT")" + for (( i=0 ; i$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_ossl_ciphers/128 + mod_check=$nr_ossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_ossl_ciphers/$num_bundles + mod_check=$nr_ossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + if "$HAS_TLS13"; then + protos_to_try="-no_ssl2 -tls1_2 -tls1_1 -tls1" + else + protos_to_try="-no_ssl2 -tls1_1 -tls1" + fi + "$HAS_SSL3" && protos_to_try+=" -ssl3" + + for proto in $protos_to_try; do + if [[ "$proto" == -tls1_1 ]]; then + num_bundles=1 + bundle_size=$nr_ossl_ciphers + fi + + [[ "$proto" != "-no_ssl2" ]] && [[ $(has_server_protocol "${proto:1}") -eq 1 ]] && continue + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_ossl_ciphers ]] && end_of_bundle=$nr_ossl_ciphers + while true; do + ciphers_to_test="" + tls13_ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + if ! "${ciphers_found2[i]}"; then + if [[ "${ciph2[i]}" == TLS13* ]] || [[ "${ciph2[i]}" == TLS_* ]]; then + tls13_ciphers_to_test+=":${ciph2[i]}" + else + ciphers_to_test+=":${ciph2[i]}" + fi + fi + done + [[ -z "$ciphers_to_test" ]] && [[ -z "$tls13_ciphers_to_test" ]] && break + $OPENSSL s_client $(s_client_options "$proto -cipher "\'${ciphers_to_test:1}\'" -ciphersuites "\'${tls13_ciphers_to_test:1}\'" $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_nonossl_ciphers/128 + mod_check=$nr_nonossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_nonossl_ciphers/$num_bundles + mod_check=$nr_nonossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + for proto in 04 03 02 01 00; do + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_nonossl_ciphers ]] && end_of_bundle=$nr_nonossl_ciphers + while true; do + ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + ! "${ciphers_found2[i]}" && ciphers_to_test+=", ${hexcode2[i]}" + done + [[ -z "$ciphers_to_test" ]] && break + [[ "$proto" == 04 ]] && [[ ! "$ciphers_to_test" =~ ,\ 13,[0-9a-f][0-9a-f] ]] && break + ciphers_to_test="$(strip_inconsistent_ciphers "$proto" "$ciphers_to_test")" + [[ -z "$ciphers_to_test" ]] && break + if "$SHOW_SIGALGO"; then + tls_sockets "$proto" "${ciphers_to_test:2}, 00,ff" "all" + else + tls_sockets "$proto" "${ciphers_to_test:2}, 00,ff" "ephemeralkey" + fi + sclient_success=$? + [[ $sclient_success -ne 0 ]] && [[ $sclient_success -ne 2 ]] && break + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + [[ "$cipher" == "${rfc_ciph2[i]}" ]] && ciphers_found2[i]=true && break + done + [[ $i -eq $end_of_bundle ]] && break + i=${index[i]} + ciphers_found[i]=true + [[ "${kx[i]}" == "Kx=any" ]] && kx[i]="$(read_dhtype_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt")" + if [[ ${kx[i]} == "Kx=ECDH" ]] || [[ ${kx[i]} == "Kx=DH" ]] || [[ ${kx[i]} == "Kx=EDH" ]]; then + dhlen=$(read_dhbits_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" quiet) + kx[i]="${kx[i]} $dhlen" + fi + "$SHOW_SIGALGO" && [[ -r "$HOSTCERT" ]] && sigalg[i]="$(read_sigalg_from_file "$HOSTCERT")" + done + done + done + + for (( i=0 ; i$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_ossl_ciphers/128 + mod_check=$nr_ossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_ossl_ciphers/$num_bundles + mod_check=$nr_ossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_ossl_ciphers ]] && end_of_bundle=$nr_ossl_ciphers + for (( success=0; success==0 ; 1 )); do + ciphers_to_test="" + tls13_ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + if ! "${ciphers_found2[i]}"; then + if [[ "$proto" == -tls1_3 ]]; then + tls13_ciphers_to_test+=":${ciph2[i]}" + else + ciphers_to_test+=":${ciph2[i]}" + fi + fi + done + success=1 + if [[ -n "$ciphers_to_test" ]] || [[ -n "$tls13_ciphers_to_test" ]]; then + $OPENSSL s_client $(s_client_options "-cipher "\'${ciphers_to_test:1}\'" -ciphersuites "\'${tls13_ciphers_to_test:1}\'" $proto $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE = 128 ciphers. So, + # test cipher suites in bundles of 128 or less. + num_bundles=$nr_nonossl_ciphers/128 + mod_check=$nr_nonossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_nonossl_ciphers/$num_bundles + mod_check=$nr_nonossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_nonossl_ciphers ]] && end_of_bundle=$nr_nonossl_ciphers + for (( success=0; success==0 ; 1 )); do + ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + ! "${ciphers_found2[i]}" && ciphers_to_test+=", ${hexcode2[i]}" + done + success=1 + if [[ -n "$ciphers_to_test" ]]; then + if "$SHOW_SIGALGO"; then + tls_sockets "$proto_hex" "${ciphers_to_test:2}, 00,ff" "all" + else + tls_sockets "$proto_hex" "${ciphers_to_test:2}, 00,ff" "ephemeralkey" + fi + if [[ $? -eq 0 ]]; then + success=0 + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + [[ "$cipher" == "${rfc_ciph2[i]}" ]] && ciphers_found2[i]=true && break + done + i=${index[i]} + ciphers_found[i]=true + [[ "$proto_text" == TLS\ 1.3 ]] && kx[i]="$(read_dhtype_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt")" + if [[ ${kx[i]} == Kx=ECDH ]] || [[ ${kx[i]} == Kx=DH ]] || [[ ${kx[i]} == Kx=EDH ]]; then + dhlen=$(read_dhbits_from_file "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" quiet) + kx[i]="${kx[i]} $dhlen" + fi + "$SHOW_SIGALGO" && [[ -r "$HOSTCERT" ]] && \ + sigalg[i]="$(read_sigalg_from_file "$HOSTCERT")" + fi + fi + done + done + fi + + for (( i=0 ; i> "$SOCK_REPLY_FILE" + rm "$sock_reply_file3" + fi + + check_tls_serverhellodone "$tls_hello_ascii" "ephemeralkey" + hello_done=$? + fi + done + + debugme echo "reading server hello..." + if [[ "$DEBUG" -ge 4 ]]; then + hexdump -C $SOCK_REPLY_FILE | head -6 + echo + fi + if [[ "${tls_hello_ascii:0:1}" == 8 ]]; then + parse_sslv2_serverhello "$SOCK_REPLY_FILE" "false" + if [[ $? -eq 3 ]] && [[ "$V2_HELLO_CIPHERSPEC_LENGTH" -ne 0 ]]; then + echo "Protocol : SSLv2" > "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" + DETECTED_TLS_VERSION="0200" + ret=0 + else + ret=1 + fi + else + parse_tls_serverhello "$tls_hello_ascii" "ephemeralkey" "$cipher_list_2send" + save=$? + + if [[ $save -eq 0 ]]; then + send_close_notify "$DETECTED_TLS_VERSION" + fi + + if [[ $DEBUG -ge 2 ]]; then + # see https://secure.wand.net.nz/trac/libprotoident/wiki/SSL + lines=$(count_lines "$(hexdump -C "$SOCK_REPLY_FILE" 2>$ERRFILE)") + tm_out " ($lines lines returned) " + fi + + # determine the return value for higher level, so that they can tell what the result is + if [[ $save -eq 1 ]] || [[ $lines -eq 1 ]]; then + ret=1 # NOT available + else + ret=0 + fi + debugme tmln_out + fi + + close_socket + TMPFILE=$SOCK_REPLY_FILE + tmpfile_handle ${FUNCNAME[0]}.dd + return $ret +} + +run_client_simulation() { + # Runs browser simulations. Browser capabilities gathered from: + # https://www.ssllabs.com/ssltest/clients.html on 10 jan 2016 + local names=() + local short=() + local protos=() + local ciphers=() + local ciphersuites=() + local tlsvers=() + local sni=() + local warning=() + local handshakebytes=() + local lowest_protocol=() + local highest_protocol=() + local service=() + local minDhBits=() + local maxDhBits=() + local minRsaBits=() + local maxRsaBits=() + local minEcdsaBits=() + local curves=() + local requiresSha2=() + local current=() + local i=0 + local name tls proto cipher temp what_dh bits curve supported_curves + local has_dh_bits using_sockets=true + local client_service + local options + local -i ret=0 + local jsonID="clientsimulation" + local client_service="" + + # source the external file + . "$TESTSSL_INSTALL_DIR/etc/testssl/client-simulation.txt" 2>/dev/null + if [[ $? -ne 0 ]]; then + prln_local_problem "couldn't find client simulation data in $TESTSSL_INSTALL_DIR/etc/testssl/client-simulation.txt" + return 1 + fi + + "$SSL_NATIVE" && using_sockets=false + + if [[ $SERVICE != "" ]]; then + client_service="$SERVICE" + elif [[ -n "$STARTTLS_PROTOCOL" ]]; then + # Can we take the service from STARTTLS? + client_service=$(toupper "${STARTTLS_PROTOCOL%s}") # strip trailing 's' in ftp(s), smtp(s), pop3(s), etc + elif "$ASSUME_HTTP"; then + client_service="HTTP" + else + outln "Could not determine the protocol, only simulating generic clients." + fi + + outln + pr_headline " Running client simulations " + [[ "$client_service" == HTTP ]] && pr_headline "($client_service) " + if "$using_sockets"; then + pr_headlineln "via sockets " + else + pr_headline "via openssl " + prln_warning " -- pls note \"--ssl-native\" will return some false results" + fileout "$jsonID" "WARN" "You shouldn't run this with \"--ssl-native\" as you will get false results" + ret=1 + fi + outln + debugme echo + + if "$WIDE"; then + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]]; then + out " Browser Protocol Cipher Suite Name (OpenSSL) " + ( "$using_sockets" || "$HAS_DH_BITS") && out "Forward Secrecy" + outln + out "--------------------------------------------------------------------------" + else + out " Browser Protocol Cipher Suite Name (IANA/RFC) " + ( "$using_sockets" || "$HAS_DH_BITS") && out "Forward Secrecy" + outln + out "------------------------------------------------------------------------------------------" + fi + ( "$using_sockets" || "$HAS_DH_BITS") && out "----------------------" + outln + fi + if ! "$using_sockets"; then + # We can't use the connectivity checker here as of now the openssl reply is always empty (reason??) + save_max_ossl_fail=$MAX_OSSL_FAIL + nr_ossl_fail=$NR_OSSL_FAIL + MAX_OSSL_FAIL=100 + fi + for name in "${short[@]}"; do + if "${current[i]}" || "$ALL_CLIENTS" ; then + # for ANY we test this service or if the service we determined from STARTTLS matches + if [[ "${service[i]}" == ANY ]] || [[ "${service[i]}" =~ $client_service ]]; then + out " $(printf -- "%-29s" "${names[i]}")" + if "$using_sockets" && [[ -n "${handshakebytes[i]}" ]]; then + client_simulation_sockets "${handshakebytes[i]}" + sclient_success=$? + if [[ $sclient_success -eq 0 ]]; then + if [[ "0x${DETECTED_TLS_VERSION}" -lt ${lowest_protocol[i]} ]] || \ + [[ "0x${DETECTED_TLS_VERSION}" -gt ${highest_protocol[i]} ]]; then + sclient_success=1 + fi + [[ $sclient_success -eq 0 ]] && cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE >$ERRFILE + fi + else + if [[ -n "${curves[i]}" ]]; then + # "$OPENSSL s_client" will fail if the -curves option includes any unsupported curves. + supported_curves="" + for curve in $(colon_to_spaces "${curves[i]}"); do + # Attention! secp256r1 = prime256v1 and secp192r1 = prime192v1 + # We need to map two curves here as otherwise handshakes will go wrong if "-curves" are supplied + # https://github.com/openssl/openssl/blob/master/apps/ecparam.c#L221 + ./ssl/t1_lib.c + [[ "$curve" =~ secp256r1 ]] && curve="${curve//secp256r1/prime256v1}" + [[ "$curve" =~ secp192r1 ]] && curve="${curve//secp192r1/prime192v1}" + [[ "$OSSL_SUPPORTED_CURVES" =~ " $curve " ]] && supported_curves+=":$curve" + done + curves[i]="" + [[ -n "$supported_curves" ]] && curves[i]="-curves ${supported_curves:1}" + fi + options="$(s_client_options "-cipher ${ciphers[i]} -ciphersuites "\'${ciphersuites[i]}\'" ${curves[i]} ${protos[i]} $STARTTLS $BUGS $PROXY -connect $NODEIP:$PORT ${sni[i]}")" + debugme echo "$OPENSSL s_client $options $TMPFILE 2>$ERRFILE + sclient_connect_successful $? $TMPFILE + sclient_success=$? + fi + if [[ $sclient_success -eq 0 ]]; then + # If an ephemeral DH key was used, check that the number of bits is within range. + temp=$(awk -F': ' '/^Server Temp Key/ { print $2 }' "$TMPFILE") # extract line + what_dh="${temp%%,*}" + bits="${temp##*, }" + # formatting + curve="${temp#*, }" + if [[ "$curve" == $bits ]]; then + curve="" + else + curve="${curve%%,*}" + fi + bits="${bits/bits/}" + bits="${bits// /}" + if [[ "$what_dh" == X25519 ]] || [[ "$what_dh" == X448 ]]; then + curve="$what_dh" + what_dh="ECDH" + fi + if [[ "$what_dh" == DH ]]; then + [[ ${minDhBits[i]} -ne -1 ]] && [[ $bits -lt ${minDhBits[i]} ]] && sclient_success=1 + [[ ${maxDhBits[i]} -ne -1 ]] && [[ $bits -gt ${maxDhBits[i]} ]] && sclient_success=1 + fi + fi + if [[ $sclient_success -ne 0 ]]; then + outln "No connection" + fileout "${jsonID}-${short[i]}" "INFO" "No connection" + else + proto=$(get_protocol $TMPFILE) + # hack: + [[ "$proto" == TLSv1 ]] && proto="TLSv1.0" + [[ "$proto" == SSLv3 ]] && proto="SSLv3 " + if [[ "$proto" == TLSv1.2 ]] && ( ! "$using_sockets" || [[ -z "${handshakebytes[i]}" ]] ); then + # OpenSSL reports TLS1.2 even if the connection is TLS1.1 or TLS1.0. Need to figure out which one it is... + for tls in ${tlsvers[i]}; do + # If the handshake data includes TLS 1.3 we need to remove it, otherwise the + # simulation will fail with # 'Oops: openssl s_client connect problem' + # before/after trying another protocol. We only print a warning it in debug mode + # as otherwise we would need e.g. handle the curves in a similar fashion -- not + # to speak about ciphers + if [[ $tls =~ 1_3 ]] && ! "$HAS_TLS13"; then + debugme pr_local_problem "TLS 1.3 not supported, " + continue + fi + options="$(s_client_options "$tls -cipher ${ciphers[i]} -ciphersuites "\'${ciphersuites[i]}\'" ${curves[i]} $STARTTLS $BUGS $PROXY -connect $NODEIP:$PORT ${sni[i]}")" + debugme echo "$OPENSSL s_client $options $TMPFILE 2>$ERRFILE + sclient_connect_successful $? $TMPFILE + sclient_success=$? + if [[ $sclient_success -eq 0 ]]; then + case "$tls" in + "-tls1_2") break ;; + "-tls1_1") proto="TLSv1.1" + break ;; + "-tls1") proto="TLSv1.0" + break ;; + esac + fi + done + fi + cipher=$(get_cipher $TMPFILE) + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]] && ( [[ "$cipher" == TLS_* ]] || [[ "$cipher" == SSL_* ]] ); then + cipher="$(rfc2openssl "$cipher")" + [[ -z "$cipher" ]] && cipher=$(get_cipher $TMPFILE) + elif [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]] && [[ "$cipher" != TLS_* ]] && [[ "$cipher" != SSL_* ]]; then + cipher="$(openssl2rfc "$cipher")" + [[ -z "$cipher" ]] && cipher=$(get_cipher $TMPFILE) + fi + out "$proto " + "$WIDE" && out " " + if [[ "$COLOR" -le 2 ]]; then + out "$cipher" + else + pr_cipher_quality "$cipher" + fi + if "$WIDE"; then + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]]; then + for (( j=${#cipher}; j < 34; j++ )); do + out " " + done + else + for (( j=${#cipher}; j < 50; j++ )); do + out " " + done + fi + fi + if [[ -n "$what_dh" ]]; then + [[ -n "$curve" ]] && curve="($curve)" + "$WIDE" || out ", " + if [[ "$what_dh" == ECDH ]]; then + pr_ecdh_quality "$bits" "$(printf -- "%-12s" "$bits bit $what_dh") $curve" + else + pr_dh_quality "$bits" "$(printf -- "%-12s" "$bits bit $what_dh") $curve" + fi + else + if "$HAS_DH_BITS" || ( "$using_sockets" && [[ -n "${handshakebytes[i]}" ]] ); then + "$WIDE" || out ", " + out "No FS" + fi + fi + outln + if [[ -n "${warning[i]}" ]]; then + out " " + outln "${warning[i]}" + fi + fileout "${jsonID}-${short[i]}" "INFO" "$proto $cipher ${warning[i]}" + debugme cat $TMPFILE + fi + fi # correct service? + fi #current? + ((i++)) + done + if ! "$using_sockets"; then + # restore from above + MAX_OSSL_FAIL=$save_max_ossl_fail + NR_OSSL_FAIL=$nr_ossl_fail + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + +# generic function whether $1 is supported by s_client ($2: string to display, currently nowhere being used) +# +locally_supported() { + [[ -n "$2" ]] && out "$2 " + if $OPENSSL s_client "$1" -connect invalid. 2>&1 | grep -aiq "unknown option"; then + prln_local_problem "$OPENSSL doesn't support \"s_client $1\"" + return 7 + fi + return 0 +} + + +# The protocol check in run_protocols needs to be redone. The using_sockets part there kind of sucks. +# 1) we need to have a variable where the results are being stored so that every other test doesn't have to do this again +# --> we have that but certain information like "downgraded" are not being passed. That's not ok for run_protocols()/ +# for all other functions we can use it +# 2) the code is old and one can do that way better +# We should do what's available and faster (openssl vs. sockets). Keep in mind that the socket reply for SSLv2 returns the number # of ciphers! +# +# arg1: -ssl2|-ssl3|-tls1|-tls1_1|-tls1_2|-tls1_3 +# +run_prototest_openssl() { + local -i ret=0 + local protos proto + + # check whether the protocol being tested is supported by $OPENSSL + $OPENSSL s_client "$1" -connect invalid. 2>&1 | grep -aiq "unknown option" && return 7 + case "$1" in + -ssl2) protos="-ssl2" ;; + -ssl3) protos="-ssl3" ;; + -tls1) protos="-no_tls1_2 -no_tls1_1 -no_ssl2"; "$HAS_TLS13" && protos+=" -no_tls1_3" ;; + -tls1_1) protos="-no_tls1_2 -no_ssl2"; "$HAS_TLS13" && protos+=" -no_tls1_3" ;; + -tls1_2) protos="-no_ssl2"; "$HAS_TLS13" && protos+=" -no_tls1_3" ;; + -tls1_3) protos="" ;; + esac + $OPENSSL s_client $(s_client_options "-state $protos $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>&1 $TMPFILE 2>&1 =2)" + ;; + 1) # no sslv2 server hello returned, like in openlitespeed which returns HTTP! + prln_svrty_best "not offered (OK)" + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl2 no + ;; + 0) # reset + prln_svrty_best "not offered (OK)" + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl2 no + ;; + 4) out "likely "; pr_svrty_best "not offered (OK), " + fileout "$jsonID" "OK" "likely not offered" + add_tls_offered ssl2 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 3) lines=$(count_lines "$(hexdump -C "$TEMPDIR/$NODEIP.sslv2_sockets.dd" 2>/dev/null)") + [[ "$DEBUG" -ge 2 ]] && tm_out " ($lines lines) " + if [[ "$lines" -gt 1 ]]; then + nr_ciphers_detected=$((V2_HELLO_CIPHERSPEC_LENGTH / 3)) + add_tls_offered ssl2 yes + if [[ 0 -eq "$nr_ciphers_detected" ]]; then + prln_svrty_high "supported but couldn't detect a cipher and vulnerable to CVE-2015-3197 "; + fileout "$jsonID" "HIGH" "offered, no cipher" "CVE-2015-3197" "CWE-310" + else + pr_svrty_critical "offered (NOT ok), also VULNERABLE to DROWN attack"; + outln " -- $nr_ciphers_detected ciphers" + fileout "$jsonID" "CRITICAL" "vulnerable with $nr_ciphers_detected ciphers" + fi + fi + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + debugme tmln_out + else + run_prototest_openssl "-ssl2" + case $? in + 0) prln_svrty_critical "offered (NOT ok)" + fileout "$jsonID" "CRITICAL" "offered" + add_tls_offered ssl2 yes + ;; + 1) prln_svrty_best "not offered (OK)" + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl2 no + ;; + 5) prln_svrty_high "CVE-2015-3197: $supported_no_ciph2"; + fileout "$jsonID" "HIGH" "offered, no cipher" "CVE-2015-3197" "CWE-310" + add_tls_offered ssl2 yes + ;; + 7) prln_local_problem "$OPENSSL doesn't support \"s_client -ssl2\"" + fileout "$jsonID" "INFO" "not tested due to lack of local support" + ((ret++)) + ;; + esac + fi + + pr_bold " SSLv3 "; + jsonID="SSLv3" + if [[ $(has_server_protocol ssl3) -eq 0 ]]; then + ret_val_ssl3=0 + elif "$using_sockets"; then + tls_sockets "00" "$TLS_CIPHER" + ret_val_ssl3=$? + else + run_prototest_openssl "-ssl3" + ret_val_ssl3=$? + fi + case $ret_val_ssl3 in + 0) prln_svrty_high "offered (NOT ok)" + fileout "$jsonID" "HIGH" "offered" + if "$using_sockets" || "$HAS_SSL3"; then + latest_supported="0300" + latest_supported_string="SSLv3" + fi + add_tls_offered ssl3 yes + ;; + 1) prln_svrty_best "not offered (OK)" + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl3 no + ;; + 2) if [[ "$DETECTED_TLS_VERSION" == 03* ]]; then + detected_version_string="TLSv1.$((0x$DETECTED_TLS_VERSION-0x0301))" + prln_svrty_critical "server responded with higher version number ($detected_version_string) than requested by client (NOT ok)" + fileout "$jsonID" "CRITICAL" "server responded with higher version number ($detected_version_string) than requested by client" + else + if [[ ${#DETECTED_TLS_VERSION} -eq 4 ]]; then + prln_svrty_critical "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2} (NOT ok)" + fileout "$jsonID" "CRITICAL" "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2}" + else + prln_svrty_medium "strange, server ${DETECTED_TLS_VERSION}" + fileout "$jsonID" "MEDIUM" "strange, server ${DETECTED_TLS_VERSION}" + ((ret++)) + fi + fi + ;; + 3) pr_svrty_best "not offered (OK), " + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl3 no + pr_warning "SSL downgraded to STARTTLS plaintext"; outln + fileout "$jsonID" "WARN" "SSL downgraded to STARTTLS plaintext" + ;; + 4) out "likely "; pr_svrty_best "not offered (OK), " + fileout "$jsonID" "OK" "not offered" + add_tls_offered ssl3 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 5) pr_svrty_high "$supported_no_ciph1" # protocol detected but no cipher --> comes from run_prototest_openssl + fileout "$jsonID" "HIGH" "$supported_no_ciph1" + add_tls_offered ssl3 yes + ;; + 7) if "$using_sockets" ; then + # can only happen in debug mode + pr_warning "strange reply, maybe a client side problem with SSLv3"; outln "$debug_recomm" + else + prln_local_problem "$OPENSSL doesn't support \"s_client -ssl3\"" + fileout "$jsonID" "WARN" "not tested due to lack of local support" + fi + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + + pr_bold " TLS 1 "; + jsonID="TLS1" + if [[ $(has_server_protocol tls1) -eq 0 ]]; then + ret_val_tls1=0 + elif "$using_sockets"; then + tls_sockets "01" "$TLS_CIPHER" + ret_val_tls1=$? + else + run_prototest_openssl "-tls1" + ret_val_tls1=$? + fi + case $ret_val_tls1 in + 0) pr_svrty_low "offered" ; outln " (deprecated)" + fileout "$jsonID" "LOW" "offered (deprecated)" + latest_supported="0301" + latest_supported_string="TLSv1.0" + add_tls_offered tls1 yes + ;; # nothing wrong with it -- per se + 1) out "not offered" + add_tls_offered tls1 no + if [[ -z $latest_supported ]]; then + outln + fileout "$jsonID" "INFO" "not offered" # neither good or bad + else + prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string (NOT ok)" + fileout "$jsonID" "CRITICAL" "connection failed rather than downgrading to $latest_supported_string" + fi + ;; + 2) pr_svrty_medium "not offered" + add_tls_offered tls1 no + if [[ "$DETECTED_TLS_VERSION" == 0300 ]]; then + [[ $DEBUG -ge 1 ]] && tm_out " -- downgraded" + outln + fileout "$jsonID" "MEDIUM" "not offered, and downgraded to SSL" + elif [[ "$DETECTED_TLS_VERSION" == 03* ]]; then + detected_version_string="TLSv1.$((0x$DETECTED_TLS_VERSION-0x0301))" + prln_svrty_critical " -- server responded with higher version number ($detected_version_string) than requested by client" + fileout "$jsonID" "CRITICAL" "server responded with higher version number ($detected_version_string) than requested by client" + else + if [[ ${#DETECTED_TLS_VERSION} -eq 4 ]]; then + prln_svrty_critical "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2} (NOT ok)" + fileout "$jsonID" "CRITICAL" "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2}" + else + prln_svrty_medium " -- strange, server ${DETECTED_TLS_VERSION}" + fileout "$jsonID" "MEDIUM" "strange, server ${DETECTED_TLS_VERSION}" + fi + fi + ;; + 3) out "not offered, " + fileout "$jsonID" "OK" "not offered" + add_tls_offered tls1 no + pr_warning "TLS downgraded to STARTTLS plaintext"; outln + fileout "$jsonID" "WARN" "TLS downgraded to STARTTLS plaintext" + ;; + 4) out "likely not offered, " + fileout "$jsonID" "INFO" "likely not offered" + add_tls_offered tls1 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 5) outln "$supported_no_ciph1" # protocol detected but no cipher --> comes from run_prototest_openssl + fileout "$jsonID" "INFO" "$supported_no_ciph1" + add_tls_offered tls1 yes + ;; + 7) if "$using_sockets" ; then + # can only happen in debug mode + pr_warning "strange reply, maybe a client side problem with TLS 1.0"; outln "$debug_recomm" + else + prln_local_problem "$OPENSSL doesn't support \"s_client -tls1\"" + fileout "$jsonID" "WARN" "not tested due to lack of local support" + fi + ((ret++)) + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + + pr_bold " TLS 1.1 "; + jsonID="TLS1_1" + if [[ $(has_server_protocol tls1_1) -eq 0 ]]; then + ret_val_tls11=0 + elif "$using_sockets"; then + tls_sockets "02" "$TLS_CIPHER" + ret_val_tls11=$? + else + run_prototest_openssl "-tls1_1" + ret_val_tls11=$? + fi + case $ret_val_tls11 in + 0) pr_svrty_low "offered" ; outln " (deprecated)" + fileout "$jsonID" "LOW" "offered (deprecated)" + latest_supported="0302" + latest_supported_string="TLSv1.1" + add_tls_offered tls1_1 yes + ;; # nothing wrong with it + 1) out "not offered" + add_tls_offered tls1_1 no + if [[ -z $latest_supported ]]; then + outln + fileout "$jsonID" "INFO" "is not offered" # neither good or bad + else + prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string" + fileout "$jsonID" "CRITICAL" "connection failed rather than downgrading to $latest_supported_string" + fi + ;; + 2) out "not offered" + add_tls_offered tls1_1 no + if [[ "$DETECTED_TLS_VERSION" == "$latest_supported" ]]; then + [[ $DEBUG -ge 1 ]] && tm_out " -- downgraded" + outln + fileout "$jsonID" "CRITICAL" "TLSv1.1 is not offered, and downgraded to a weaker protocol" + elif [[ "$DETECTED_TLS_VERSION" == 0300 ]] && [[ "$latest_supported" == 0301 ]]; then + prln_svrty_critical " -- server supports TLSv1.0, but downgraded to SSLv3 (NOT ok)" + fileout "$jsonID" "CRITICAL" "not offered, and downgraded to SSLv3 rather than TLSv1.0" + elif [[ "$DETECTED_TLS_VERSION" == 03* ]] && [[ 0x$DETECTED_TLS_VERSION -gt 0x0302 ]]; then + detected_version_string="TLSv1.$((0x$DETECTED_TLS_VERSION-0x0301))" + prln_svrty_critical " -- server responded with higher version number ($detected_version_string) than requested by client (NOT ok)" + fileout "$jsonID" "CRITICAL" "not offered, server responded with higher version number ($detected_version_string) than requested by client" + else + if [[ ${#DETECTED_TLS_VERSION} -eq 4 ]]; then + prln_svrty_critical "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2} (NOT ok)" + fileout "$jsonID" "CRITICAL" "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2}" + else + prln_svrty_medium " -- strange, server ${DETECTED_TLS_VERSION}" + fileout "$jsonID" "MEDIUM" "strange, server ${DETECTED_TLS_VERSION}" + fi + fi + ;; + 3) out "not offered, " + fileout "$jsonID" "OK" "not offered" + add_tls_offered tls1_1 no + pr_warning "TLS downgraded to STARTTLS plaintext"; outln + fileout "$jsonID" "WARN" "TLS downgraded to STARTTLS plaintext" + ;; + 4) out "likely not offered, " + fileout "$jsonID" "INFO" "is not offered" + add_tls_offered tls1_1 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 5) outln "$supported_no_ciph1" # protocol detected but no cipher --> comes from run_prototest_openssl + fileout "$jsonID" "INFO" "$supported_no_ciph1" + add_tls_offered tls1_1 yes + ;; + 7) if "$using_sockets" ; then + # can only happen in debug mode + pr_warning "strange reply, maybe a client side problem with TLS 1.1"; outln "$debug_recomm" + else + prln_local_problem "$OPENSSL doesn't support \"s_client -tls1_1\"" + fileout "$jsonID" "WARN" "not tested due to lack of local support" + fi + ((ret++)) + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + + # Now, we are doing a basic/pre test for TLS 1.2 and 1.3 in order not to penalize servers (medium) + # running TLS 1.3 only when TLS 1.2 is not offered. 0 and 5 are the return codes for + # TLS 1.3 support (kind of, including deprecated pre-versions of TLS 1.3) + if [[ $(has_server_protocol tls1_2) -eq 0 ]]; then + ret_val_tls12=0 + elif "$using_sockets"; then + tls_sockets "03" "$TLS12_CIPHER" + ret_val_tls12=$? + tls12_detected_version="$DETECTED_TLS_VERSION" + else + run_prototest_openssl "-tls1_2" + ret_val_tls12=$? + tls12_detected_version="$DETECTED_TLS_VERSION" + fi + + if [[ $(has_server_protocol tls1_3) -eq 0 ]]; then + ret_val_tls13=0 + elif "$using_sockets"; then + # Need to ensure that at most 128 ciphers are included in ClientHello. + # If the TLSv1.2 test in determine_optimal_sockets_params() was successful, + # then use the 5 TLSv1.3 ciphers plus the cipher selected in the TLSv1.2 test. + # If the TLSv1.2 test was not successful, then just use the 5 TLSv1.3 ciphers + # plus the list of ciphers used in all of the previous tests ($TLS_CIPHER). + if [[ -n "$TLS12_CIPHER_OFFERED" ]]; then + tls13_ciphers_to_test="$TLS13_CIPHER, $TLS12_CIPHER_OFFERED, 00,ff" + else + tls13_ciphers_to_test="$TLS13_CIPHER,$TLS_CIPHER" + fi + tls_sockets "04" "$tls13_ciphers_to_test" + ret_val_tls13=$? + else + run_prototest_openssl "-tls1_3" + ret_val_tls13=$? + fi + if [[ $ret_val_tls13 -eq 0 ]] || [[ $ret_val_tls13 -eq 5 ]]; then + offers_tls13=true # This variable comes in handy for further if statements below + fi + # Done with pretesting TLS 1.2 and 1.3. + + pr_bold " TLS 1.2 "; + jsonID="TLS1_2" + case $ret_val_tls12 in + 0) prln_svrty_best "offered (OK)" + fileout "$jsonID" "OK" "offered" + latest_supported="0303" + latest_supported_string="TLSv1.2" + add_tls_offered tls1_2 yes + ;; # GCM cipher in TLS 1.2: very good! + 1) add_tls_offered tls1_2 no + if "$offers_tls13"; then + out "not offered" + else + pr_svrty_medium "not offered" + fi + if [[ -z $latest_supported ]]; then + outln + if "$offers_tls13"; then + fileout "$jsonID" "INFO" "not offered" + else + fileout "$jsonID" "MEDIUM" "not offered" # TLS 1.3, no TLS 1.2 --> no GCM, penalty + fi + else + prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string" + fileout "$jsonID" "CRITICAL" "connection failed rather than downgrading to $latest_supported_string" + fi + ;; + 2) add_tls_offered tls1_2 no + pr_svrty_medium "not offered and downgraded to a weaker protocol" + if [[ "$tls12_detected_version" == 0300 ]]; then + detected_version_string="SSLv3" + elif [[ "$tls12_detected_version" == 03* ]]; then + detected_version_string="TLSv1.$((0x$tls12_detected_version-0x0301))" + fi + if [[ "$tls12_detected_version" == "$latest_supported" ]]; then + outln + fileout "$jsonID" "MEDIUM" "not offered and downgraded to a weaker protocol" + elif [[ "$tls12_detected_version" == 03* ]] && [[ 0x$tls12_detected_version -lt 0x$latest_supported ]]; then + prln_svrty_critical " -- server supports $latest_supported_string, but downgraded to $detected_version_string" + fileout "$jsonID" "CRITICAL" "not offered, and downgraded to $detected_version_string rather than $latest_supported_string" + elif [[ "$tls12_detected_version" == 03* ]] && [[ 0x$tls12_detected_version -gt 0x0303 ]]; then + prln_svrty_critical " -- server responded with higher version number ($detected_version_string) than requested by client" + fileout "$jsonID" "CRITICAL" "not offered, server responded with higher version number ($detected_version_string) than requested by client" + else + if [[ ${#tls12_detected_version} -eq 4 ]]; then + prln_svrty_critical "server responded with version number ${tls12_detected_version:0:2}.${tls12_detected_version:2:2} (NOT ok)" + fileout "$jsonID" "CRITICAL" "server responded with version number ${tls12_detected_version:0:2}.${tls12_detected_version:2:2}" + else + prln_svrty_medium " -- strange, server ${tls12_detected_version}" + fileout "$jsonID" "MEDIUM" "strange, server ${tls12_detected_version}" + fi + fi + ;; + 3) out "not offered, " + fileout "$jsonID" "INFO" "not offered" + add_tls_offered tls1_2 no + pr_warning "TLS downgraded to STARTTLS plaintext"; outln + fileout "$jsonID" "WARN" "TLS downgraded to STARTTLS plaintext" + ;; + 4) out "likely "; pr_svrty_medium "not offered, " + fileout "$jsonID" "MEDIUM" "not offered" + add_tls_offered tls1_2 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 5) outln "$supported_no_ciph1" # protocol detected, but no cipher --> comes from run_prototest_openssl + fileout "$jsonID" "INFO" "$supported_no_ciph1" + add_tls_offered tls1_2 yes + ;; + 7) if "$using_sockets" ; then + # can only happen in debug mode + pr_warning "strange reply, maybe a client side problem with TLS 1.2"; outln "$debug_recomm" + else + prln_local_problem "$OPENSSL doesn't support \"s_client -tls1_2\"" + fileout "$jsonID" "WARN" "not tested due to lack of local support" + fi + ((ret++)) + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + + pr_bold " TLS 1.3 "; + jsonID="TLS1_3" + case $ret_val_tls13 in + 0) if ! "$using_sockets"; then + prln_svrty_best "offered (OK)" + fileout "$jsonID" "OK" "offered" + else + # If TLS 1.3 is offered, then its support was detected + # by determine_optimal_sockets_params(). + if [[ $(has_server_protocol tls1_3_rfc8446) -eq 0 ]]; then + drafts_offered+=" 0304 " + else + for i in 1C 1B 1A 19 18 17 16 15 14 13 12; do + if [[ $(has_server_protocol tls1_3_draft$(hex2dec "$i")) -eq 0 ]]; then + drafts_offered+=" 7F$i " + break + fi + done + fi + KEY_SHARE_EXTN_NR="28" + while true; do + supported_versions="" + for i in 16 15 14 13 12; do + [[ "$drafts_offered" =~ \ 7F$i\ ]] || supported_versions+=",7f,$i" + done + [[ -z "$supported_versions" ]] && break + supported_versions="00, 2b, 00, $(printf "%02x" $((${#supported_versions}/3+1))), $(printf "%02x" $((${#supported_versions}/3))) $supported_versions" + tls_sockets "04" "$TLS13_CIPHER" "" "$supported_versions" + [[ $? -eq 0 ]] || break + if [[ "${TLS_SERVER_HELLO:8:3}" == 7F1 ]]; then + drafts_offered+=" ${TLS_SERVER_HELLO:8:4} " + elif [[ "$TLS_SERVER_HELLO" =~ 002B00027F1[2-6] ]]; then + drafts_offered+=" ${BASH_REMATCH:8:4} " + fi + done + KEY_SHARE_EXTN_NR="33" + while true; do + supported_versions="" + for i in 1C 1B 1A 19 18 17; do + [[ "$drafts_offered" =~ \ 7F$i\ ]] || supported_versions+=",7f,$i" + done + [[ "$drafts_offered" =~ \ 0304\ ]] || supported_versions+=",03,04" + [[ -z "$supported_versions" ]] && break + supported_versions="00, 2b, 00, $(printf "%02x" $((${#supported_versions}/3+1))), $(printf "%02x" $((${#supported_versions}/3))) $supported_versions" + tls_sockets "04" "$TLS13_CIPHER" "" "$supported_versions" + [[ $? -eq 0 ]] || break + if [[ "$TLS_SERVER_HELLO" =~ 002B00020304 ]]; then + drafts_offered+=" 0304 " + elif [[ "$TLS_SERVER_HELLO" =~ 002B00027F1[7-9A-C] ]]; then + drafts_offered+=" ${BASH_REMATCH:8:4} " + fi + done + KEY_SHARE_EXTN_NR="$key_share_extn_nr" + if [[ -n "$drafts_offered" ]]; then + for i in 1C 1B 1A 19 18 17 16 15 14 13 12; do + if [[ "$drafts_offered" =~ \ 7F$i\ ]]; then + [[ -n "$drafts_offered_str" ]] && drafts_offered_str+=", " + drafts_offered_str+="draft $(printf "%d" 0x$i)" + fi + done + if [[ "$drafts_offered" =~ \ 0304\ ]]; then + [[ -n "$drafts_offered_str" ]] && drafts_offered_str+=", " + drafts_offered_str+="final" + fi + if [[ "$drafts_offered" =~ \ 0304\ ]]; then + pr_svrty_best "offered (OK)"; outln ": $drafts_offered_str" + fileout "$jsonID" "OK" "offered with $drafts_offered_str" + else + out "offered (OK)"; outln ": $drafts_offered_str" + fileout "$jsonID" "INFO" "offered with $drafts_offered_str" + fi + else + pr_warning "Unexpected results"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "unexpected results" + fi + fi + latest_supported="0304" + latest_supported_string="TLSv1.3" + add_tls_offered tls1_3 yes + ;; + 1) pr_svrty_low "not offered" + if [[ -z $latest_supported ]]; then + outln + fileout "$jsonID" "LOW" "not offered" + else + prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string" + fileout "$jsonID" "CRITICAL" "connection failed rather than downgrading to $latest_supported_string" + fi + add_tls_offered tls1_3 no + ;; + 2) if [[ "$DETECTED_TLS_VERSION" == 0300 ]]; then + detected_version_string="SSLv3" + elif [[ "$DETECTED_TLS_VERSION" == 03* ]]; then + detected_version_string="TLSv1.$((0x$DETECTED_TLS_VERSION-0x0301))" + fi + if [[ "$DETECTED_TLS_VERSION" == "$latest_supported" ]]; then + outln "not offered and downgraded to a weaker protocol" + fileout "$jsonID" "INFO" "not offered + downgraded to weaker protocol" + elif [[ "$DETECTED_TLS_VERSION" == 03* ]] && [[ 0x$DETECTED_TLS_VERSION -lt 0x$latest_supported ]]; then + out "not offered" + prln_svrty_critical " -- server supports $latest_supported_string, but downgraded to $detected_version_string" + fileout "$jsonID" "CRITICAL" "not offered, and downgraded to $detected_version_string rather than $latest_supported_string" + elif [[ "$DETECTED_TLS_VERSION" == 03* ]] && [[ 0x$DETECTED_TLS_VERSION -gt 0x0304 ]]; then + out "not offered" + prln_svrty_critical " -- server responded with higher version number ($detected_version_string) than requested by client" + fileout "$jsonID" "CRITICAL" "not offered, server responded with higher version number ($detected_version_string) than requested by client" + else + out "not offered" + prln_svrty_critical " -- server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2}" + fileout "$jsonID" "CRITICAL" "server responded with version number ${DETECTED_TLS_VERSION:0:2}.${DETECTED_TLS_VERSION:2:2}" + fi + add_tls_offered tls1_3 no + ;; + 3) out "not offered " + fileout "$jsonID" "INFO" "not offered" + add_tls_offered tls1_3 no + pr_warning "TLS downgraded to STARTTLS plaintext"; outln + fileout "$jsonID" "WARN" "TLS downgraded to STARTTLS plaintext" + ;; + 4) out "likely not offered, " + fileout "$jsonID" "INFO" "not offered" + add_tls_offered tls1_3 no + pr_warning "received 4xx/5xx after STARTTLS handshake"; outln "$debug_recomm" + fileout "$jsonID" "WARN" "received 4xx/5xx after STARTTLS handshake${debug_recomm}" + ;; + 5) outln "$supported_no_ciph1" # protocol detected but no cipher --> comes from run_prototest_openssl + fileout "$jsonID" "INFO" "$supported_no_ciph1" + add_tls_offered tls1_3 yes + ;; + 7) if "$using_sockets" ; then + # can only happen in debug mode + prln_warning "strange reply, maybe a client side problem with TLS 1.3"; outln "$debug_recomm" + else + prln_local_problem "$OPENSSL doesn't support \"s_client -tls1_3\"" + fileout "$jsonID" "WARN" "not tested due to lack of local support" + fi + ((ret++)) + ;; + *) pr_fixme "unexpected value around line $((LINENO))"; outln "$debug_recomm" + ((ret++)) + ;; + esac + + debugme echo "PROTOS_OFFERED: $PROTOS_OFFERED" + if [[ ! "$PROTOS_OFFERED" =~ yes ]]; then + outln + ignore_no_or_lame "You should not proceed as no protocol was detected. If you still really really want to, say \"YES\"" "YES" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + fi + + return $ret +} + + +# list ciphers (and makes sure you have them locally configured) +# arg[1]: non-TLSv1.3 cipher list (or anything else) +# arg[2]: TLSv1.3 cipher list +# arg[3]: protocol (e.g., -ssl2) +# +listciphers() { + local -i ret + local debugname="" + local tls13_ciphers="$TLS13_OSSL_CIPHERS" + + [[ "$2" != ALL ]] && tls13_ciphers="$2" + if "$HAS_CIPHERSUITES"; then + $OPENSSL ciphers $OSSL_CIPHERS_S $3 -ciphersuites "$tls13_ciphers" "$1" &>$TMPFILE + elif [[ -n "$tls13_ciphers" ]]; then + $OPENSSL ciphers $OSSL_CIPHERS_S $3 "$tls13_ciphers:$1" &>$TMPFILE + else + $OPENSSL ciphers $OSSL_CIPHERS_S $3 "$1" &>$TMPFILE + fi + ret=$? + debugme cat $TMPFILE + debugname="$(sed -e s'/\!/not/g' -e 's/\:/_/g' <<< "$1")" + tmpfile_handle ${FUNCNAME[0]}.${debugname}.txt + return $ret +} + + +# argv[1]: non-TLSv1.3 cipher list to test in OpenSSL syntax +# argv[2]: TLSv1.3 cipher list to test in OpenSSL syntax +# argv[3]: string on console / HTML or "finding" +# argv[4]: rating whether ok to offer +# argv[5]: string to be appended for fileout +# argv[6]: non-SSLv2 cipher list to test (hexcodes), if using sockets +# argv[7]: SSLv2 cipher list to test (hexcodes), if using sockets +# argv[8]: true if using sockets, false if not +# argv[9]: CVE +# argv[10]: CWE +# +sub_cipherlists() { + local -i i len sclient_success=1 + local cipherlist sslv2_cipherlist detected_ssl2_ciphers + local singlespaces + local proto="" + local -i ret=0 + local jsonID="cipherlist" + local using_sockets="${8}" + local cve="${9}" + local cwe="${10}" + + pr_bold "$3 " + [[ "$OPTIMAL_PROTO" == -ssl2 ]] && proto="$OPTIMAL_PROTO" + jsonID="${jsonID}_$5" + + if "$using_sockets" || listciphers "$1" "$2" $proto; then + if ! "$using_sockets" || ( "$FAST" && listciphers "$1" "$2" -tls1 ); then + for proto in -no_ssl2 -tls1_2 -tls1_1 -tls1 -ssl3; do + if [[ "$proto" == -tls1_2 ]]; then + # If $OPENSSL doesn't support TLSv1.3 or if no TLSv1.3 + # ciphers are being tested, then a TLSv1.2 ClientHello + # was tested in the first iteration. + ! "$HAS_TLS13" && continue + [[ -z "$2" ]] && continue + fi + ! "$HAS_SSL3" && [[ "$proto" == -ssl3 ]] && continue + if [[ "$proto" != -no_ssl2 ]]; then + "$FAST" && continue + [[ $(has_server_protocol "${proto:1}") -eq 1 ]] && continue + fi + $OPENSSL s_client $(s_client_options "-cipher "$1" -ciphersuites "\'$2\'" $BUGS $STARTTLS -connect $NODEIP:$PORT $PROXY $SNI $proto") 2>$ERRFILE >$TMPFILE $ERRFILE >$TMPFILE atm ok, as sockets are preferred. If there would be a single function for testing: yes. +run_cipherlists() { + local hexc hexcode strength + local -i i + local -i ret=0 + local ossl_null_ciphers null_ciphers sslv2_null_ciphers + local ossl_anon_ciphers anon_ciphers sslv2_anon_ciphers + local ossl_exp_ciphers exp_ciphers sslv2_exp_ciphers + local ossl_low_ciphers low_ciphers sslv2_low_ciphers + local ossl_tdes_ciphers tdes_ciphers sslv2_tdes_cipher + local ossl_average_ciphers average_ciphers + local strong_ciphers + local cwe="CWE-327" + local cwe2="CWE-310" + local cve="" + local using_sockets=true + + outln + pr_headlineln " Testing cipher categories " + outln + "$SSL_NATIVE" && using_sockets=false + + # conversion 2 byte ciphers via: echo "$@" | sed -e 's/[[:xdigit:]]\{2\},/0x&/g' -e 's/, /\n/g' | while read ci; do grep -wi $ci etc/cipher-mapping.txt; done + + ossl_null_ciphers='NULL:eNULL' + null_ciphers="c0,10, c0,06, c0,15, c0,0b, c0,01, c0,3b, c0,3a, c0,39, 00,b9, 00,b8, 00,b5, 00,b4, 00,2e, 00,2d, 00,b1, 00,b0, 00,2c, 00,3b, 00,02, 00,01, 00,82, 00,83, ff,87, 00,ff" + sslv2_null_ciphers="FF,80,10, 00,00,00" + + ossl_anon_ciphers='aNULL:ADH' + anon_ciphers="c0,19, 00,a7, 00,6d, 00,3a, 00,c5, 00,89, c0,47, c0,5b, c0,85, c0,18, 00,a6, 00,6c, 00,34, 00,bf, 00,9b, 00,46, c0,46, c0,5a, c0,84, c0,16, 00,18, c0,17, 00,1b, 00,1a, 00,19, 00,17, c0,15, 00,ff" + sslv2_anon_ciphers="FF,80,10" + + ossl_exp_ciphers='EXPORT:!ADH:!NULL' + # grep -i EXP etc/cipher-mapping.txt + exp_ciphers="00,63, 00,62, 00,61, 00,65, 00,64, 00,60, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e, 00,17, 00,03, 00,28, 00,2b, 00,ff" + sslv2_exp_ciphers="04,00,80, 02,00,80, 00,00,00" + + ossl_low_ciphers='LOW:DES:RC2:RC4:!ADH:!EXP:!NULL:!eNULL' + # egrep -w '64|56|RC2|RC4' etc/cipher-mapping.txt | egrep -v 'Au=None|export' + low_ciphers="00,04, 00,05, 00,09, 00,0C, 00,0F, 00,12, 00,15, 00,1E, 00,20, 00,22, 00,24, 00,66, 00,8A, 00,8E, 00,92, C0,02, C0,07, C0,0C, C0,11, C0,33, FE,FE, FF,E1, 00,FF" + sslv2_low_ciphers="01,00,80, 03,00,80, 06,00,40, 06,01,40, 08,00,80, FF,80,00" + + ossl_tdes_ciphers='3DES:IDEA:!aNULL:!ADH' + # egrep -w '3DES|IDEA' etc/cipher-mapping.txt | grep -v "Au=None" + tdes_ciphers="00,07, 00,0A, 00,0D, 00,10, 00,13, 00,16, 00,1F, 00,21, 00,23, 00,25, 00,8B, 00,8F, 00,93, C0,03, C0,08, C0,0D, C0,12, C0,1A, C0,1B, C0,1C, C0,34, FE,FF, FF,E0, 00,FF" + sslv2_tdes_ciphers="05,00,80, 07,00,c0, 07,01,c0" + + # Now all AES, CAMELLIA, ARIA and SEED CBC ciphers plus GOST + ossl_average_ciphers='HIGH:MEDIUM:AES:CAMELLIA:ARIA:!IDEA:!CHACHA20:!3DES:!RC2:!RC4:!AESCCM8:!AESCCM:!AESGCM:!ARIAGCM:!aNULL' + # egrep -w "256|128" etc/cipher-mapping.txt | egrep -v "Au=None|AEAD|RC2|RC4|IDEA" + average_ciphers="00,2F, 00,30, 00,31, 00,32, 00,33, 00,35, 00,36, 00,37, 00,38, 00,39, 00,3C, 00,3D, 00,3E, 00,3F, 00,40, 00,41, 00,42, 00,43, 00,44, 00,45, 00,67, 00,68, 00,69, 00,6A, 00,6B, 00,84, 00,85, 00,86, 00,87, 00,88, 00,8C, 00,8D, 00,90, 00,91, 00,94, 00,95, 00,96, 00,97, 00,98, 00,99, 00,9A, 00,AE, 00,AF, 00,B2, 00,B3, 00,B6, 00,B7, 00,BA, 00,BB, 00,BC, 00,BD, 00,BE, 00,C0, 00,C1, 00,C2, 00,C3, 00,C4, C0,04, C0,05, C0,09, C0,0A, C0,0E, C0,0F, C0,13, C0,14, C0,1D, C0,1E, C0,1F, C0,20, C0,21, C0,22, C0,23, C0,24, C0,25, C0,26, C0,27, C0,28, C0,29, C0,2A, C0,35, C0,36, C0,37, C0,38, C0,3C, C0,3D, C0,3E, C0,3F, C0,40, C0,41, C0,42, C0,43, C0,44, C0,45, C0,48, C0,49, C0,4A, C0,4B, C0,4C, C0,4D, C0,4E, C0,4F, C0,64, C0,65, C0,66, C0,67, C0,68, C0,69, C0,70, C0,71, C0,72, C0,73, C0,74, C0,75, C0,76, C0,77, C0,78, C0,79, C0,94, C0,95, C0,96, C0,97, C0,98, C0,99, C0,9A, C0,9B" + # Workaround: If we use sockets and in order not to hit 132+1 ciphers we omit the GOST ciphers if SERVER_SIZE_LIMIT_BUG is true. + # This won't be supported by Cisco ACE anyway. Catch is, if SERVER_SIZE_LIMIT_BUG was not tested for before (only this function is being called) + "$SERVER_SIZE_LIMIT_BUG" || average_ciphers="${average_ciphers}, 00,80, 00,81, FF,00, FF,01, FF,02, FF,03, FF,85" + average_ciphers="${average_ciphers}, 00,FF" + + # Here's the strongest discrepancy between sockets and OpenSSL + ossl_strong_ciphers='AESGCM:CHACHA20:AESGCM:CamelliaGCM:AESCCM:ARIAGCM' + # grep AEAD etc/cipher-mapping.txt | grep -v Au=None + strong_ciphers="00,9C, 00,9D, 00,9E, 00,9F, 00,A0, 00,A1, 00,A2, 00,A3, 00,A4, 00,A5, 00,A8, 00,A9, 00,AA, 00,AB, 00,AC, 00,AD, 13,01, 13,02, 13,03, 13,04, 13,05, 16,B7, 16,B8, 16,B9, 16,BA, C0,2B, C0,2C, C0,2D, C0,2E, C0,2F, C0,30, C0,31, C0,32, C0,50, C0,51, C0,52, C0,53, C0,54, C0,55, C0,56, C0,57, C0,58, C0,59, C0,5C, C0,5D, C0,5E, C0,5F, C0,60, C0,61, C0,62, C0,63, C0,6A, C0,6B, C0,6C, C0,6D, C0,6E, C0,6F, C0,7A, C0,7B, C0,7C, C0,7D, C0,7E, C0,7F, C0,80, C0,81, C0,82, C0,83, C0,86, C0,87, C0,88, C0,89, C0,8A, C0,8B, C0,8C, C0,8D, C0,8E, C0,8F, C0,90, C0,91, C0,92, C0,93, C0,9C, C0,9D, C0,9E, C0,9F, C0,A0, C0,A1, C0,A2, C0,A3, C0,A4, C0,A5, C0,A6, C0,A7, C0,A8, C0,A9, C0,AA, C0,AB, C0,AC, C0,AD, C0,AE, C0,AF, CC,13, CC,14, CC,15, CC,A8, CC,A9, CC,AA, CC,AB, CC,AC, CC,AD, CC,AE, 00,FF" + + # argv[1]: non-TLSv1.3 cipher list to test in OpenSSL syntax + # argv[2]: TLSv1.3 cipher list to test in OpenSSL syntax + # argv[3]: string on console / HTML or "finding" + # argv[4]: rating whether ok to offer + # argv[5]: string to be appended for fileout + # argv[6]: non-SSLv2 cipher list to test (hexcodes), if using sockets + # argv[7]: SSLv2 cipher list to test (hexcodes), if using sockets + # argv[8]: true if using sockets, false if not + # argv[9]: CVE + # argv[10]: CWE + + sub_cipherlists "$ossl_null_ciphers" "" " NULL ciphers (no encryption) " 1 "NULL" "$null_ciphers" "$sslv2_null_ciphers" "$using_sockets" "$cve" "$cwe" + ret=$? + sub_cipherlists "$ossl_anon_ciphers" "" " Anonymous NULL Ciphers (no authentication)" 1 "aNULL" "$anon_ciphers" "$sslv2_anon_ciphers" "$using_sockets" "$cve" "$cwe" + ret=$((ret + $?)) + sub_cipherlists "$ossl_exp_ciphers" "" " Export ciphers (w/o ADH+NULL) " 1 "EXPORT" "$exp_ciphers" "$sslv2_exp_ciphers" "$using_sockets" "$cve" "$cwe" + ret=$((ret + $?)) + sub_cipherlists "$ossl_low_ciphers" "" " LOW: 64 Bit + DES, RC[2,4] (w/o export) " 2 "LOW" "$low_ciphers" "$sslv2_low_ciphers" "$using_sockets" "$cve" "$cwe" + ret=$((ret + $?)) + sub_cipherlists "$ossl_tdes_ciphers" "" " Triple DES Ciphers / IDEA " 3 "3DES_IDEA" "$tdes_ciphers" "$sslv2_tdes_ciphers" "$using_sockets" "$cve" "$cwe2" + ret=$((ret + $?)) + sub_cipherlists "$ossl_average_ciphers" "" " Obsolete CBC ciphers (AES, ARIA etc.) " 4 "AVERAGE" "$average_ciphers" "" "$using_sockets" "$cve" "$cwe2" + ret=$((ret + $?)) + sub_cipherlists "$ossl_strong_ciphers" 'ALL' " Strong encryption (AEAD ciphers) " 7 "STRONG" "$strong_ciphers" "" "$using_sockets" "" "" + ret=$((ret + $?)) + + outln + return $ret +} + +# The return value is an indicator of the quality of the DH key length in $1: +# 1 = pr_svrty_critical, 2 = pr_svrty_high, 3 = pr_svrty_medium, 4 = pr_svrty_low +# 5 = neither good nor bad, 6 = pr_svrty_good, 7 = pr_svrty_best +pr_dh_quality() { + local bits="$1" + local string="$2" + + if [[ "$bits" -le 600 ]]; then + pr_svrty_critical "$string" + return 1 + elif [[ "$bits" -le 800 ]]; then + pr_svrty_high "$string" + return 2 + elif [[ "$bits" -le 1280 ]]; then + pr_svrty_medium "$string" + return 3 + elif [[ "$bits" -ge 2048 ]]; then + pr_svrty_good "$string" + return 6 + else + out "$string" + return 5 + fi +} + +# prints out dh group=prime and in round brackets DH bits and labels it accordingly +# arg1: name of dh group, arg2=bit length +pr_dh() { + local -i quality=0 + + pr_italic "$1" + out " (" + pr_dh_quality "$2" "$2 bits" + quality=$? + out ")" + return $quality +} + +pr_ecdh_quality() { + local bits="$1" + local string="$2" + + if [[ "$bits" -le 80 ]]; then # has that ever existed? + pr_svrty_critical "$string" + elif [[ "$bits" -le 108 ]]; then # has that ever existed? + pr_svrty_high "$string" + elif [[ "$bits" -le 163 ]]; then + pr_svrty_medium "$string" + elif [[ "$bits" -le 193 ]]; then # hmm, according to https://wiki.openssl.org/index.php/Elliptic_Curve_Cryptography it should ok + pr_svrty_low "$string" # but openssl removed it https://github.com/drwetter/testssl.sh/issues/299#issuecomment-220905416 + elif [[ "$bits" -le 224 ]]; then + out "$string" + elif [[ "$bits" -gt 224 ]]; then + pr_svrty_good "$string" + else + out "$string" + fi +} + +pr_ecdh_curve_quality() { + curve="$1" + local -i bits=0 + + case "$curve" in + "sect163k1") bits=163 ;; + "sect163r1") bits=162 ;; + "sect163r2") bits=163 ;; + "sect193r1") bits=193 ;; + "sect193r2") bits=193 ;; + "sect233k1") bits=232 ;; + "sect233r1") bits=233 ;; + "sect239k1") bits=238 ;; + "sect283k1") bits=281 ;; + "sect283r1") bits=282 ;; + "sect409k1") bits=407 ;; + "sect409r1") bits=409 ;; + "sect571k1") bits=570 ;; + "sect571r1") bits=570 ;; + "secp160k1") bits=161 ;; + "secp160r1") bits=161 ;; + "secp160r2") bits=161 ;; + "secp192k1") bits=192 ;; + "prime192v1") bits=192 ;; + "secp224k1") bits=225 ;; + "secp224r1") bits=224 ;; + "secp256k1") bits=256 ;; + "prime256v1") bits=256 ;; + "secp384r1") bits=384 ;; + "secp521r1") bits=521 ;; + "brainpoolP256r1") bits=256 ;; + "brainpoolP384r1") bits=384 ;; + "brainpoolP512r1") bits=512 ;; + "X25519") bits=253 ;; + "X448") bits=448 ;; + esac + pr_ecdh_quality "$bits" "$curve" +} + +# Print $2 based on the quality of the cipher in $1. If $2 is empty, just print $1. +# The return value is an indicator of the quality of the cipher in $1: +# 0 = $1 is empty +# 1 = pr_svrty_critical, 2 = pr_svrty_high, 3 = pr_svrty_medium, 4 = pr_svrty_low +# 5 = neither good nor bad, 6 = pr_svrty_good, 7 = pr_svrty_best +# +# Please note this section isn't particular spot on. It needs to be reconsidered/redone +# SHA1, SSLv3 ciphers are some points which need to be considered. +# Hint: find out by "grep etc/cipher-mapping.txt" but it' might be be easier +# to look out Enc= and Au= or Mac= +# +pr_cipher_quality() { + local cipher="$1" + local text="$2" + + [[ -z "$1" ]] && return 0 + [[ -z "$text" ]] && text="$cipher" + + if [[ "$cipher" != TLS_* ]] && [[ "$cipher" != SSL_* ]]; then + # This must be the OpenSSL name for a cipher or for TLS 1.3 ($TLS13_OSSL_CIPHERS) + # We can ignore them however as the OpenSSL and RFC names currently match + if [[ $TLS_NR_CIPHERS -eq 0 ]]; then + # We have an OpenSSL name and can't convert it to the RFC name which is rarely + # the case, see "prepare_arrays()" and "./etc/cipher-mapping.txt" + case "$cipher" in + *NULL*|EXP*|ADH*) + pr_svrty_critical "$text" + return 1 + ;; + *RC4*|*RC2*|*MD5|*M1) + pr_svrty_high "$text" + return 2 + ;; + AES256-GCM-SHA384|AES128-GCM-SHA256|AES256-CCM|AES128-CCM|ARIA256-GCM-SHA384|ARIA128-GCM-SHA256) + # RSA kx and e.g. GCM isn't certainly the best + pr_svrty_good "$text" + return 6 + ;; + *GCM*|*CCM*|*CHACHA20*) + pr_svrty_best "$text" + return 7 + ;; #best ones + *CBC3*|*SEED*|*3DES*|*IDEA*) + pr_svrty_medium "$text" + return 3 + ;; + ECDHE*AES*|DHE*AES*SHA*|*CAMELLIA*SHA) + pr_svrty_low "$text" + return 4 + ;; + *) + out "$text" + return 5 + ;; + esac + fi + cipher="$(openssl2rfc "$cipher")" + fi + + # Now we look at the RFC cipher names. The sequence matters - as above. + case "$cipher" in + *NULL*|*EXP*|*_DES40_*|*anon*) + pr_svrty_critical "$text" + return 1 + ;; + *RC4*|*RC2*|*MD5|*MD5_1) + pr_svrty_high "$text" + return 2 + ;; + *_DES_*) + if [[ "$cipher" =~ EDE3 ]]; then + pr_svrty_medium "$text" # 3DES + return 3 + fi + pr_svrty_high "$text" + return 2 + ;; + *CBC3*|*SEED*|*3DES*|*IDEA*) + pr_svrty_medium "$text" + return 3 + ;; + TLS_RSA_*) + if [[ "$cipher" =~ CBC ]]; then + pr_svrty_low "$text" + return 4 + else + pr_svrty_good "$text" + # RSA kx and e.g. GCM isn't certainly the best + return 6 + fi + ;; + *GCM*|*CCM*|*CHACHA20*) + pr_svrty_best "$text" + return 7 + ;; + *ECDHE*AES*CBC*|*DHE*AES*SHA*|*RSA*AES*SHA*|*CAMELLIA*SHA*) + pr_svrty_low "$text" + return 4 + ;; + *) + out "$text" + return 5 + ;; + esac +} + +# arg1: file with input for grepping the type of ephemeral DH key (DH ECDH) +read_dhtype_from_file() { + local temp kx + + temp=$(awk -F': ' '/^Server Temp Key/ { print $2 }' "$1") # extract line + kx="Kx=${temp%%,*}" + [[ "$kx" == "Kx=X25519" ]] && kx="Kx=ECDH" + [[ "$kx" == "Kx=X448" ]] && kx="Kx=ECDH" + tm_out "$kx" + return 0 +} + +# arg1: certificate file +read_sigalg_from_file() { + $OPENSSL x509 -noout -text -in "$1" 2>/dev/null | awk -F':' '/Signature Algorithm/ { print $2; exit; }' +} + + +# arg1: file with input for grepping the bit length for ECDH/DHE +# arg2: whether to print warning "old fart" or not (empty: no) +read_dhbits_from_file() { + local bits what_dh temp curve="" + local add="" + local old_fart=" (your $OPENSSL cannot show DH bits)" + + temp=$(awk -F': ' '/^Server Temp Key/ { print $2 }' "$1") # extract line + what_dh="${temp%%,*}" + bits="${temp##*, }" + curve="${temp#*, }" + if [[ "$curve" == "$bits" ]]; then + curve="" + else + curve="${curve%%,*}" + fi + bits="${bits/bits/}" + bits="${bits// /}" + + if [[ "$what_dh" == X25519 ]] || [[ "$what_dh" == X448 ]]; then + curve="$what_dh" + what_dh="ECDH" + fi + if [[ -z "$2" ]]; then + if [[ -n "$curve" ]]; then + debugme echo ">$HAS_DH_BITS|$what_dh($curve)|$bits<" + else + debugme echo ">$HAS_DH_BITS|$what_dh|$bits<" + fi + fi + [[ -n "$what_dh" ]] && HAS_DH_BITS=true # FIX 190 + if [[ -z "$what_dh" ]] && ! "$HAS_DH_BITS"; then + if [[ "$2" == "string" ]]; then + tm_out "$old_fart" + elif [[ -z "$2" ]]; then + pr_warning "$old_fart" + fi + return 0 + fi + if [[ "$2" == quiet ]]; then + tm_out "$bits" + return 0 + fi + [[ -z "$2" ]] && [[ -n "$bits" ]] && out ", " + if [[ $what_dh == DH ]] || [[ $what_dh == EDH ]]; then + add="bit DH" + [[ -n "$curve" ]] && add+=" ($curve)" + if [[ "$2" == string ]]; then + tm_out ", $bits $add" + else + pr_dh_quality "$bits" "$bits $add" + fi + # https://wiki.openssl.org/index.php/Elliptic_Curve_Cryptography, https://www.keylength.com/en/compare/ + elif [[ $what_dh == ECDH ]]; then + add="bit ECDH" + [[ -n "$curve" ]] && add+=" ($curve)" + if [[ "$2" == string ]]; then + tm_out ", $bits $add" + else + pr_ecdh_quality "$bits" "$bits $add" + fi + fi + return 0 +} + + +# arg1: ID or empty. If empty resumption by ticket will be tested, otherwise by ID +# return: 0: it has resumption, 1:nope, 2: nope (OpenSSL 1.1.1), 6: CLIENT_AUTH --> problem for resumption, 7: can't tell +# +# This is basically a short(?) version from Bulletproof SSL and TLS (p386). The version according to that would be e.g. +# echo | $OPENSSL s_client -connect testssl.sh:443 -servername testssl.sh -no_ssl2 -reconnect 2>&1 | grep -E 'New|Reused' +# echo | $OPENSSL s_client -connect testssl.sh:443 -servername testssl.sh -no_ssl2 -no_ticket -reconnect 2>&1 | grep -E 'New|Reused|Session-ID' +# +# FIXME: actually Ivan's version seems faster. Worth to check and since when -reconnect is a/v +# +sub_session_resumption() { + local ret ret1 ret2 + local tmpfile=$(mktemp $TEMPDIR/session_resumption.$NODEIP.XXXXXX) + local sess_data=$(mktemp $TEMPDIR/sub_session_data_resumption.$NODEIP.XXXXXX) + local -a rw_line + local not_new_reused=false + local protocol="$1" + + if [[ "$2" == ID ]]; then + local byID=true + local addcmd="-no_ticket" + else + local byID=false + local addcmd="" + if ! "$TLS_TICKETS"; then + return 1 + fi + fi + "$CLIENT_AUTH" && return 6 + if "$HAS_NO_SSL2"; then + addcmd+=" -no_ssl2" + else + protocol=${protocol/\./_} + protocol=${protocol/v/} + protocol="-$(tolower $protocol)" + # In some cases a server will not support session tickets, but will support session resumption + # by ID. In such a case, it may be more likely to support session resumption with TLSv1.2 than + # with TLSv1.3. So, if testing a server that does not support session tickets and that supports + # both TLSv1.3 and TLSv1.2 for session resumption by ID, then use a TLSv1.2 ClientHello. (Note that + # the line below assumes that if $protocol is -tls1_3, then the server either supports TLSv1.2 or + # is TLSv1.3-only. + ! "$TLS_TICKETS" && "$byID" && [[ $(has_server_protocol "tls1_2") -eq 0 ]] && protocol="-tls1_2" + addcmd+=" $protocol" + fi + + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI $addcmd -sess_out $sess_data") /dev/null + ret1=$? + if [[ $ret1 -ne 0 ]]; then + debugme echo -n "Couldn't connect #1 " + return 7 + fi + if "$byID" && [[ ! "$OSSL_NAME" =~ LibreSSL ]] && \ + ( [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 1.1.1* ]] || [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 3.0.0* ]] ) && \ + [[ ! -s "$sess_data" ]]; then + # it seems OpenSSL indicates no Session ID resumption by just not generating output + debugme echo -n "No session resumption byID (empty file)" + ret=2 + else + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI $addcmd -sess_in $sess_data") $tmpfile 2>$ERRFILE + ret2=$? + if [[ $DEBUG -ge 2 ]]; then + echo -n "$ret1, $ret2, " + [[ -s "$sess_data" ]] && echo "not empty" || echo "empty" + fi + if [[ $ret2 -ne 0 ]]; then + debugme echo -n "Couldn't connect #2 " + return 7 + fi + # "Reused" indicates session material was reused, "New": not + if grep -aq "^Reused" "$tmpfile"; then + new_sid=false + elif grep -aq "^New" "$tmpfile"; then + new_sid=true + else + debugme echo -n "Problem with 2nd ServerHello " + not_new_reused=true + fi + # Now get the line and compare the numbers "read" and "written" as a second criteria. + # If the "read" number is bigger: a new session ID was probably used + rw_line="$(awk '/^SSL handshake has read/ { print $5" "$(NF-1) }' "$tmpfile" )" + rw_line=($rw_line) + if [[ "${rw_line[0]}" -gt "${rw_line[1]}" ]]; then + new_sid2=true + else + new_sid2=false + fi + debugme echo "${rw_line[0]}, ${rw_line[1]}" + + if "$new_sid2" && "$new_sid"; then + debugme echo -n "No session resumption " + ret=1 + elif ! "$new_sid2" && ! "$new_sid"; then + debugme echo -n "Session resumption " + ret=0 + else + debugme echo -n "unclear status: $ret1, $ret2, $new_sid, $new_sid2 -- " + ret=5 + fi + if [[ $DEBUG -ge 2 ]]; then + "$byID" && echo "byID" || echo "by ticket" + fi + fi + "$byID" && \ + tmpfile_handle ${FUNCNAME[0]}.byID.log $tmpfile || \ + tmpfile_handle ${FUNCNAME[0]}.byticket.log $tmpfile + return $ret +} + +run_server_preference() { + local cipher1="" cipher2="" tls13_cipher1="" tls13_cipher2="" default_proto="" + local prev_cipher="" default_cipher="" + local limitedsense="" supported_sslv2_ciphers + local -a cipher proto + local proto_ossl proto_txt proto_hex cipherlist i + local -i ret=0 j sclient_success str_len + local list_fwd="DHE-RSA-SEED-SHA:SEED-SHA:DES-CBC3-SHA:RC4-MD5:DES-CBC-SHA:RC4-SHA:AES128-SHA:AES128-SHA256:AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-AES256-SHA:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:DHE-DSS-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES256-SHA256:ECDHE-RSA-DES-CBC3-SHA:ECDHE-RSA-AES128-SHA256:AES256-GCM-SHA384:AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:ADH-AES256-GCM-SHA384:AECDH-AES128-SHA:ECDHE-RSA-RC4-SHA:ECDHE-ECDSA-AES128-SHA" + local list_reverse="ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-RC4-SHA:AECDH-AES128-SHA:ADH-AES256-GCM-SHA384:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-DES-CBC3-SHA:AES256-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDH-RSA-AES256-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-DES-CBC3-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-AES128-SHA:AES256-SHA:AES128-SHA256:AES128-SHA:RC4-SHA:DES-CBC-SHA:RC4-MD5:DES-CBC3-SHA:SEED-SHA:DHE-RSA-SEED-SHA" + tls_list_fwd="c0,2c, c0,30, 00,9f, cc,a9, cc,a8, cc,aa, c0,2b, c0,2f, 00,9e, c0,24, c0,28, 00,6b, c0,23, c0,27, 00,67, c0,0a, 00,04, 00,05, 00,09, 00,0a, 00,9a, 00,96, + c0,14, 00,39, c0,09, c0,13, 00,33, 00,9d, 00,9c, 13,01, 13,02, 13,03, 13,04, 13,05, 00,3d, 00,3c, 00,35, 00,2f, 00,ff" + tls_list_rev="00,2f, 00,35, 00,3c, 00,3d, 13,05, 13,04, 13,03, 13,02, 13,01, 00,9c, 00,9d, 00,33, c0,13, c0,09, 00,39, c0,14, 00,96, 00,9a, 00,0a, 00,09, 00,05, 00,04, + c0,0a, 00,67, c0,27, c0,23, 00,6b, c0,28, c0,24, 00,9e, c0,2f, c0,2b, cc,aa, cc,a8, cc,a9, 00,9f, c0,30, c0,2c, 00,ff" + local has_cipher_order=false has_tls13_cipher_order=false + local addcmd="" addcmd2="" + local using_sockets=true + local jsonID="cipher_order" + local cwe="CWE-310" + local cve="" + + "$SSL_NATIVE" && using_sockets=false + + outln + pr_headlineln " Testing server preferences " + + outln + pr_bold " Has server cipher order? " + + if [[ "$OPTIMAL_PROTO" == -ssl2 ]]; then + addcmd="$OPTIMAL_PROTO" + else + # the supplied openssl will send an SSLv2 ClientHello if $SNI is empty + # and the -no_ssl2 isn't provided. + addcmd="-no_ssl2 $SNI" + fi + + # Determine negotiated protocol upfront + sclient_success=1 + if "$using_sockets" && [[ $(has_server_protocol "tls1_3") -ne 1 ]]; then + # Send similar list of cipher suites as OpenSSL 1.1.1 does + tls_sockets "04" \ + "c0,2c, c0,30, 00,9f, cc,a9, cc,a8, cc,aa, c0,2b, c0,2f, 00,9a, 00,96, + 00,9e, c0,24, c0,28, 00,6b, c0,23, c0,27, 00,67, c0,0a, + c0,14, 00,39, c0,09, c0,13, 00,33, 00,9d, 00,9c, 13,02, + 13,03, 13,01, 13,04, 13,05, 00,3d, 00,3c, 00,35, 00,2f, 00,ff" \ + "ephemeralkey" + sclient_success=$? + if [[ $sclient_success -eq 0 ]]; then + add_tls_offered tls1_3 yes + elif [[ $sclient_success -eq 2 ]]; then + sclient_success=0 # 2: downgraded + case $DETECTED_TLS_VERSION in + 0303) add_tls_offered tls1_2 yes ;; + 0302) add_tls_offered tls1_1 yes ;; + 0301) add_tls_offered tls1 yes ;; + 0300) add_tls_offered ssl3 yes ;; + esac + fi + if [[ $sclient_success -eq 0 ]] ; then + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" "$TEMPDIR/$NODEIP.parse_tls13_serverhello.txt" + cipher0=$(get_cipher $TMPFILE) + fi + fi + if [[ $sclient_success -ne 0 ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $addcmd") >$ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE; then + cipher0=$(get_cipher $TMPFILE) + debugme tm_out "0 --> $cipher0\n" + cp $TMPFILE "$TEMPDIR/$NODEIP.parse_tls13_serverhello.txt" + else + # 2 second try with $OPTIMAL_PROTO especially for intolerant IIS6 servers: + $OPENSSL s_client $(s_client_options "$STARTTLS $OPTIMAL_PROTO $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + if ! sclient_connect_successful $? $TMPFILE; then + pr_warning "Handshake error!" + ret=1 + fi + fi + fi + default_proto=$(get_protocol $TMPFILE) + [[ "$default_proto" == TLSv1.0 ]] && default_proto="TLSv1" + # debugme tm_out " --> $default_proto\n" + + # Some servers don't have a TLS 1.3 cipher order, see #1163 + if [[ "$default_proto" == TLSv1.3 ]]; then + tls_sockets "04" "13,05, 13,04, 13,03, 13,02, 13,01, 00,ff" + [[ $? -ne 0 ]] && ret=1 && prln_fixme "something weird happened around line $((LINENO - 1))" + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + tls13_cipher1=$(get_cipher $TMPFILE) + debugme tm_out "TLS 1.3: --> $tls13_cipher1\n" + tls_sockets "04" "13,01, 13,02, 13,03, 13,04, 13,05, 00,ff" + [[ $? -ne 0 ]] && ret=1 && prln_fixme "something weird happened around line $((LINENO - 1))" + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + tls13_cipher2=$(get_cipher $TMPFILE) + debugme tm_out "TLS 1.3: --> $tls13_cipher2\n" + + [[ $tls13_cipher1 == $tls13_cipher2 ]] && has_tls13_cipher_order=true + fi + # Check whether the server has a cipher order for SSLv3 - TLSv1.2 + if [[ $(has_server_protocol "tls1_2") -ne 0 ]] && [[ $(has_server_protocol "tls1_1") -ne 0 ]] && \ + [[ $(has_server_protocol "tls1") -ne 0 ]] && [[ $(has_server_protocol "ssl3") -ne 0 ]]; then + # Based on testing performed by determine_optimal_sockets_params(), it is believed that + # this server does not offer SSLv3 - TLSv1.2. + has_cipher_order="$has_tls13_cipher_order" + elif [[ "$OPTIMAL_PROTO" != -ssl2 ]]; then + if [[ -n "$STARTTLS_OPTIMAL_PROTO" ]]; then + [[ ! "$STARTTLS_OPTIMAL_PROTO" =~ ssl ]] && addcmd2="$SNI" + [[ "$STARTTLS_OPTIMAL_PROTO" != -tls1_3 ]] && addcmd2+=" $STARTTLS_OPTIMAL_PROTO" + else + addcmd2="-no_ssl2 $SNI" + fi + [[ $DEBUG -ge 4 ]] && echo -e "\n Forward: ${list_fwd}" + $OPENSSL s_client $(s_client_options "$STARTTLS -cipher $list_fwd $BUGS -connect $NODEIP:$PORT $PROXY $addcmd2") $ERRFILE >$TMPFILE + if ! sclient_connect_successful $? $TMPFILE; then + list_fwd="$(actually_supported_osslciphers $list_fwd '' '-tls1')" + pr_warning "no matching cipher in this list found (pls report this): " + outln "$list_fwd . " + fileout "$jsonID" "WARN" "Could not determine server cipher order, no matching cipher in list found (pls report this): $list_fwd" + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + # we assume the problem is with testing here but it could be also the server side + fi + cipher1=$(get_cipher $TMPFILE) # cipher1 from 1st serverhello + debugme tm_out "1 --> $cipher1\n" + + # second client hello with reverse list + [[ $DEBUG -ge 4 ]] && echo -e "\n Reverse: ${list_reverse}" + $OPENSSL s_client $(s_client_options "$STARTTLS -cipher $list_reverse $BUGS -connect $NODEIP:$PORT $PROXY $addcmd2") >$ERRFILE >$TMPFILE + # first handshake worked above so no error handling here + cipher2=$(get_cipher $TMPFILE) # cipher2 from 2nd serverhello + debugme tm_out "2 --> $cipher2\n" + + [[ $cipher1 == $cipher2 ]] && has_cipher_order=true + fi + debugme echo "has_cipher_order: $has_cipher_order" + debugme echo "has_tls13_cipher_order: $has_tls13_cipher_order" + + if "$TLS13_ONLY" && ! "$has_tls13_cipher_order"; then + out "no (TLS 1.3 only)" + limitedsense=" (limited sense as client will pick)" + fileout "$jsonID" "INFO" "not a cipher order for TLS 1.3 configured" + elif ! "$has_cipher_order" && ! "$has_tls13_cipher_order"; then + # server used the different ends (ciphers) from the client hello + pr_svrty_high "no (NOT ok)" + limitedsense=" (limited sense as client will pick)" + fileout "$jsonID" "HIGH" "NOT a cipher order configured" + elif "$has_cipher_order" && ! "$has_tls13_cipher_order" && [[ "$default_proto" == TLSv1.3 ]]; then + pr_svrty_good "yes (OK)"; out " -- only for < TLS 1.3" + fileout "$jsonID" "OK" "server -- TLS 1.3 client determined" + elif ! "$has_cipher_order" && "$has_tls13_cipher_order"; then + pr_svrty_high "no (NOT ok)"; out " -- only for TLS 1.3" + fileout "$jsonID" "HIGH" "server -- < TLS 1.3 client determined" + else + if "$has_tls13_cipher_order"; then + if "$TLS13_ONLY"; then + out "yes (TLS 1.3 only)" + fileout "$jsonID" "INFO" "server (TLS 1.3)" + else + pr_svrty_best "yes (OK)" + out " -- TLS 1.3 and below" + fileout "$jsonID" "OK" "server" + fi + else + # we don't have TLS 1.3 at all + pr_svrty_best "yes (OK)" + fileout "$jsonID" "OK" "server" + fi + fi + outln + + pr_bold " Negotiated protocol " + jsonID="protocol_negotiated" + + case "$default_proto" in + *TLSv1.3) + prln_svrty_best $default_proto + fileout "$jsonID" "OK" "Default protocol TLS1.3" + ;; + *TLSv1.2) + prln_svrty_best $default_proto + fileout "$jsonID" "OK" "Default protocol TLS1.2" + ;; + *TLSv1.1) + prln_svrty_low $default_proto + fileout "$jsonID" "LOW" "Default protocol TLS1.1" + ;; + *TLSv1) + prln_svrty_low $default_proto + fileout "$jsonID" "LOW" "Default protocol TLS1.0" + ;; + *SSLv2) + prln_svrty_critical $default_proto + fileout "$jsonID" "CRITICAL" "Default protocol SSLv2" + ;; + *SSLv3) + prln_svrty_critical $default_proto + fileout "$jsonID" "CRITICAL" "Default protocol SSLv3" + ;; + "") + pr_warning "default proto empty" + if [[ $OSSL_VER == 1.0.2* ]]; then + outln " (Hint: if IIS6 give OpenSSL 1.0.1 a try)" + fileout "$jsonID" "WARN" "Default protocol empty (Hint: if IIS6 give OpenSSL 1.0.1 a try)" + else + outln + fileout "$jsonID" "WARN" "Default protocol empty" + fi + ret=1 + ;; + *) + pr_warning "FIXME line $LINENO: $default_proto" + fileout "$jsonID" "WARN" "FIXME line $LINENO: $default_proto" + ret=1 + ;; + esac + + pr_bold " Negotiated cipher " + jsonID="cipher_negotiated" + + # restore file from above + [[ "$default_proto" == TLSv1.3 ]] && cp "$TEMPDIR/$NODEIP.parse_tls13_serverhello.txt" $TMPFILE + cipher1=$(get_cipher $TMPFILE) + + # Sanity check: Handshake with no ciphers and one with forward list didn't overlap + if [[ "$cipher0" != $cipher1 ]]; then + limitedsense=" (matching cipher in list missing)" + fi + + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]] && ( [[ "$cipher1" == TLS_* ]] || [[ "$cipher1" == SSL_* ]] ); then + default_cipher="$(rfc2openssl "$cipher1")" + elif [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]] && [[ "$cipher1" != TLS_* ]] && [[ "$cipher1" != SSL_* ]]; then + default_cipher="$(openssl2rfc "$cipher1")" + fi + [[ -z "$default_cipher" ]] && default_cipher="$cipher1" + pr_cipher_quality "$default_cipher" + case $? in + 1) fileout "$jsonID" "CRITICAL" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") $limitedsense" + ;; + 2) fileout "$jsonID" "HIGH" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") $limitedsense" + ;; + 3) fileout "$jsonID" "MEDIUM" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") $limitedsense" + ;; + 6|7) fileout "$jsonID" "OK" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") $limitedsense" + ;; # best ones + 4) fileout "$jsonID" "LOW" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") (cbc) $limitedsense" + ;; # it's CBC. --> lucky13 + 0) pr_warning "default cipher empty" ; + if [[ $OSSL_VER == 1.0.2* ]]; then + out " (Hint: if IIS6 give OpenSSL 1.0.1 a try)" + fileout "$jsonID" "WARN" "Default cipher empty (if IIS6 give OpenSSL 1.0.1 a try) $limitedsense" + else + fileout "$jsonID" "WARN" "Default cipher empty $limitedsense" + fi + ret=1 + ;; + *) fileout "$jsonID" "INFO" "$default_cipher$(read_dhbits_from_file "$TMPFILE" "string") $limitedsense" + ;; + esac + read_dhbits_from_file "$TMPFILE" + + if [[ "$cipher0" != $cipher1 ]]; then + pr_warning " -- inconclusive test, matching cipher in list missing" + outln ", better see below" + #FIXME: This is ugly but the best we can do before rewrite this section + else + outln "$limitedsense" + fi + + if "$has_cipher_order"; then + "$FAST" && using_sockets=false + [[ $TLS_NR_CIPHERS == 0 ]] && using_sockets=false + + pr_bold " Cipher order" + while read proto_ossl proto_hex proto_txt; do + [[ "$proto_ossl" == tls1_3 ]] && ! "$has_tls13_cipher_order" && continue + cipher_pref_check "$proto_ossl" "$proto_hex" "$proto_txt" "$using_sockets" + done <<< "$(tm_out " ssl3 00 SSLv3\n tls1 01 TLSv1\n tls1_1 02 TLSv1.1\n tls1_2 03 TLSv1.2\n tls1_3 04 TLSv1.3\n")" + outln + outln + else + pr_bold " Negotiated cipher per proto"; outln " $limitedsense" + i=1 + for proto_ossl in ssl2 ssl3 tls1 tls1_1 tls1_2 tls1_3; do + if [[ $proto_ossl == ssl2 ]] && ! "$HAS_SSL2"; then + if ! "$using_sockets" || [[ $TLS_NR_CIPHERS -eq 0 ]]; then + out " (SSLv2: "; pr_local_problem "$OPENSSL doesn't support \"s_client -ssl2\""; outln ")"; + continue + else + sslv2_sockets "" "true" + if [[ $? -eq 3 ]] && [[ "$V2_HELLO_CIPHERSPEC_LENGTH" -ne 0 ]]; then + # Just arbitrarily pick the first cipher in the cipher-mapping.txt list. + proto[i]="SSLv2" + supported_sslv2_ciphers="$(grep "Supported cipher: " "$TEMPDIR/$NODEIP.parse_sslv2_serverhello.txt")" + for (( j=0; j < TLS_NR_CIPHERS; j++ )); do + if [[ "${TLS_CIPHER_SSLVERS[j]}" == "SSLv2" ]]; then + cipher1="${TLS_CIPHER_HEXCODE[j]}" + cipher1="$(tolower "x${cipher1:2:2}${cipher1:7:2}${cipher1:12:2}")" + if [[ "$supported_sslv2_ciphers" =~ $cipher1 ]]; then + if ( [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]] && [[ "${TLS_CIPHER_OSSL_NAME[j]}" != "-" ]] ) || [[ "${TLS_CIPHER_RFC_NAME[j]}" == "-" ]]; then + cipher[i]="${TLS_CIPHER_OSSL_NAME[j]}" + else + cipher[i]="${TLS_CIPHER_RFC_NAME[j]}" + fi + break + fi + fi + done + [[ $DEBUG -ge 2 ]] && tmln_out "Default cipher for ${proto[i]}: ${cipher[i]}" + else + proto[i]="" + cipher[i]="" + fi + fi + elif ( [[ $proto_ossl == ssl3 ]] && ! "$HAS_SSL3" ) || ( [[ $proto_ossl == tls1_3 ]] && ! "$HAS_TLS13" ); then + if [[ $proto_ossl == ssl3 ]]; then + proto_txt="SSLv3" ; proto_hex="00" ; cipherlist="$TLS_CIPHER" + else + proto_txt="TLSv1.3" ; proto_hex="04" ; cipherlist="$TLS13_CIPHER" + fi + if ! "$using_sockets"; then + out " ($proto_txt: "; pr_local_problem "$OPENSSL doesn't support \"s_client -$proto_ossl\"" ; outln ")"; + continue + else + tls_sockets "$proto_hex" "$cipherlist" + if [[ $? -eq 0 ]]; then + proto[i]="$proto_txt" + cipher1=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + cipher[i]="$cipher1" + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]] && [[ $TLS_NR_CIPHERS -ne 0 ]]; then + cipher[i]="$(rfc2openssl "$cipher1")" + [[ -z "${cipher[i]}" ]] && cipher[i]="$cipher1" + fi + [[ $DEBUG -ge 2 ]] && tmln_out "Default cipher for ${proto[i]}: ${cipher[i]}" + else + proto[i]="" + cipher[i]="" + fi + fi + else + $OPENSSL s_client $(s_client_options "$STARTTLS -"$proto_ossl" $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE; then + proto[i]=$(get_protocol $TMPFILE) + cipher[i]=$(get_cipher $TMPFILE) + [[ ${cipher[i]} == "0000" ]] && cipher[i]="" # Hack! + if [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]] && [[ -n "${cipher[i]}" ]]; then + cipher[i]="$(openssl2rfc "${cipher[i]}")" + [[ -z "${cipher[i]}" ]] && cipher[i]=$(get_cipher $TMPFILE) + fi + [[ $DEBUG -ge 2 ]] && tmln_out "Default cipher for ${proto[i]}: ${cipher[i]}" + else + proto[i]="" + cipher[i]="" + fi + fi + [[ -n "${cipher[i]}" ]] && add_tls_offered "$proto_ossl" yes + i=$((i + 1)) + done + + for i in 1 2 3 4 5 6; do + if [[ -n "${cipher[i]}" ]]; then # cipher not empty + if [[ -z "$prev_cipher" ]] || [[ "$prev_cipher" != "${cipher[i]}" ]]; then + [[ -n "$prev_cipher" ]] && outln + str_len=${#cipher[i]} + out " " + if [[ "$COLOR" -le 2 ]]; then + out "${cipher[i]}" + else + pr_cipher_quality "${cipher[i]}" + fi + out ":" + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]]; then + for (( 1; str_len < 30; str_len++ )); do + out " " + done + else + for (( 1; str_len < 51; str_len++ )); do + out " " + done + fi + else + out ", " # same cipher --> only print out protocol behind it + fi + out "${proto[i]}" + prev_cipher="${cipher[i]}" + fi + fileout "cipher_order_${proto[i]}" "INFO" "${cipher[i]} at ${proto[i]} $limitedsense" + done + outln "\n No further cipher order check has been done as order is determined by the client" + outln + fi + return $ret +} + +check_tls12_pref() { + local batchremoved="-CAMELLIA:-IDEA:-KRB5:-PSK:-SRP:-aNULL:-eNULL" + local batchremoved_success=false + local tested_cipher="" cipher ciphers_to_test + local order="" + local -i nr_ciphers_found_r1=0 nr_ciphers_found_r2=0 + + while true; do + $OPENSSL s_client $(s_client_options "$STARTTLS -tls1_2 $BUGS -cipher "ALL$tested_cipher:$batchremoved" -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE ; then + cipher=$(get_cipher $TMPFILE) + order+=" $cipher" + tested_cipher="$tested_cipher:-$cipher" + nr_ciphers_found_r1+=1 + "$FAST" && break + else + debugme tmln_out "A: $tested_cipher" + break + fi + done + batchremoved="${batchremoved//-/}" + while true; do + # no ciphers from "ALL$tested_cipher:$batchremoved" left + # now we check $batchremoved, and remove the minus signs first: + $OPENSSL s_client $(s_client_options "$STARTTLS -tls1_2 $BUGS -cipher "$batchremoved" -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE ; then + batchremoved_success=true # signals that we have some of those ciphers and need to put everything together later on + cipher=$(get_cipher $TMPFILE) + order+=" $cipher" + batchremoved="$batchremoved:-$cipher" + nr_ciphers_found_r1+=1 + debugme tmln_out "B1: $batchremoved" + "$FAST" && break + else + debugme tmln_out "B2: $batchremoved" + break + # nothing left with batchremoved ciphers, we need to put everything together + fi + done + + if "$batchremoved_success"; then + # now we combine the two cipher sets from both while loops + combined_ciphers="$order" + order="" ; tested_cipher="" + while true; do + ciphers_to_test="" + for cipher in $combined_ciphers; do + [[ ! "$tested_cipher:" =~ :-$cipher: ]] && ciphers_to_test+=":$cipher" + done + [[ -z "$ciphers_to_test" ]] && break + $OPENSSL s_client $(s_client_options "$STARTTLS -tls1_2 $BUGS -cipher "${ciphers_to_test:1}" -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE ; then + cipher=$(get_cipher $TMPFILE) + order+=" $cipher" + tested_cipher="$tested_cipher:-$cipher" + nr_ciphers_found_r2+=1 + "$FAST" && break + else + # This shouldn't happen. + break + fi + done + if "$FAST" && [[ $nr_ciphers_found_r2 -ne 1 ]]; then + prln_fixme "something weird happened around line $((LINENO - 14))" + return 1 + elif ! "$FAST" && [[ $nr_ciphers_found_r2 -ne $nr_ciphers_found_r1 ]]; then + prln_fixme "something weird happened around line $((LINENO - 16))" + return 1 + fi + fi + tm_out "$order" + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +cipher_pref_check() { + local p="$1" proto_hex="$2" proto="$3" + local using_sockets="$4" + local tested_cipher cipher order rfc_cipher rfc_order + local overflow_probe_cipherlist="ALL:-ECDHE-RSA-AES256-GCM-SHA384:-AES128-SHA:-DES-CBC3-SHA" + local -i i nr_ciphers nr_nonossl_ciphers num_bundles mod_check bundle_size bundle end_of_bundle success + local hexc ciphers_to_test + local -a rfc_ciph hexcode ciphers_found ciphers_found2 + local -a -i index + local ciphers_found_with_sockets + + order=""; ciphers_found_with_sockets=false + if [[ $p == ssl3 ]] && ! "$HAS_SSL3" && ! "$using_sockets"; then + out "\n SSLv3: "; pr_local_problem "$OPENSSL doesn't support \"s_client -ssl3\""; + return 0 + fi + if [[ $p == tls1_3 ]] && ! "$HAS_TLS13" && ! "$using_sockets"; then + out "\n TLSv1.3 "; pr_local_problem "$OPENSSL doesn't support \"s_client -tls1_3\""; + return 0 + fi + + [[ $(has_server_protocol "$p") -eq 1 ]] && return 0 + + if ( [[ $p != tls1_3 ]] || "$HAS_TLS13" ) && ( [[ $p != ssl3 ]] || "$HAS_SSL3" ); then + if [[ $p == tls1_2 ]] && "$SERVER_SIZE_LIMIT_BUG"; then + order="$(check_tls12_pref)" + else + tested_cipher="" + while true; do + if [[ $p != tls1_3 ]]; then + ciphers_to_test="-cipher ALL:COMPLEMENTOFALL${tested_cipher}" + else + ciphers_to_test="" + for cipher in $(colon_to_spaces "$TLS13_OSSL_CIPHERS"); do + [[ ! "$tested_cipher" =~ ":-"$cipher ]] && ciphers_to_test+=":$cipher" + done + [[ -z "$ciphers_to_test" ]] && break + ciphers_to_test="-ciphersuites ${ciphers_to_test:1}" + fi + $OPENSSL s_client $(s_client_options "$STARTTLS -"$p" $BUGS $ciphers_to_test -connect $NODEIP:$PORT $PROXY $SNI") >$ERRFILE >$TMPFILE + sclient_connect_successful $? $TMPFILE || break + cipher=$(get_cipher $TMPFILE) + [[ -z "$cipher" ]] && break + order+="$cipher " + tested_cipher+=":-"$cipher + "$FAST" && break + done + fi + fi + + nr_nonossl_ciphers=0 + if "$using_sockets"; then + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + ciphers_found[i]=false + hexc="${TLS_CIPHER_HEXCODE[i]}" + if [[ ${#hexc} -eq 9 ]]; then + if [[ " $order " =~ " ${TLS_CIPHER_OSSL_NAME[i]} " ]]; then + ciphers_found[i]=true + else + ciphers_found2[nr_nonossl_ciphers]=false + hexcode[nr_nonossl_ciphers]="${hexc:2:2},${hexc:7:2}" + rfc_ciph[nr_nonossl_ciphers]="${TLS_CIPHER_RFC_NAME[i]}" + index[nr_nonossl_ciphers]=$i + # Only test ciphers that are relevant to the protocol. + if [[ "$p" == tls1_3 ]]; then + [[ "${hexc:2:2}" == "13" ]] && nr_nonossl_ciphers+=1 + elif [[ "$p" == tls1_2 ]]; then + [[ "${hexc:2:2}" != 13 ]] && nr_nonossl_ciphers+=1 + elif [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA256 ]] && \ + [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA384 ]] && \ + [[ "${TLS_CIPHER_RFC_NAME[i]}" != *"_CCM" ]] && \ + [[ "${TLS_CIPHER_RFC_NAME[i]}" != *"_CCM_8" ]]; then + nr_nonossl_ciphers+=1 + fi + fi + fi + done + fi + + if [[ $nr_nonossl_ciphers -eq 0 ]]; then + num_bundles=0 + elif [[ $p != tls1_2 ]] || ! "$SERVER_SIZE_LIMIT_BUG"; then + num_bundles=1 + bundle_size=$nr_nonossl_ciphers + else + num_bundles=$nr_nonossl_ciphers/128 + mod_check=$nr_nonossl_ciphers%128 + [[ $mod_check -ne 0 ]] && num_bundles=$num_bundles+1 + + bundle_size=$nr_nonossl_ciphers/$num_bundles + mod_check=$nr_nonossl_ciphers%$num_bundles + [[ $mod_check -ne 0 ]] && bundle_size+=1 + fi + + for (( bundle=0; bundle < num_bundles; bundle++ )); do + end_of_bundle=$bundle*$bundle_size+$bundle_size + [[ $end_of_bundle -gt $nr_nonossl_ciphers ]] && end_of_bundle=$nr_nonossl_ciphers + while true; do + ciphers_to_test="" + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + ! "${ciphers_found2[i]}" && ciphers_to_test+=", ${hexcode[i]}" + done + [[ -z "$ciphers_to_test" ]] && break + tls_sockets "$proto_hex" "${ciphers_to_test:2}, 00,ff" "ephemeralkey" + [[ $? -ne 0 ]] && break + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + for (( i=bundle*bundle_size; i < end_of_bundle; i++ )); do + [[ "$cipher" == "${rfc_ciph[i]}" ]] && ciphers_found2[i]=true && break + done + i=${index[i]} + ciphers_found[i]=true + ciphers_found_with_sockets=true + if [[ $p != tls1_2 ]] || ! "$SERVER_SIZE_LIMIT_BUG"; then + # Throw out the results found so far and start over using just sockets + bundle=$num_bundles + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + ciphers_found[i]=true + done + break + fi + done + done + + # If additional ciphers were found using sockets and there is no + # SERVER_SIZE_LIMIT_BUG, then just use sockets to find the cipher order. + # If there is a SERVER_SIZE_LIMIT_BUG, then use sockets to find the cipher + # order, but starting with the list of ciphers supported by the server. + if "$ciphers_found_with_sockets"; then + order="" + nr_ciphers=0 + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + hexc="${TLS_CIPHER_HEXCODE[i]}" + if "${ciphers_found[i]}" && [[ ${#hexc} -eq 9 ]]; then + ciphers_found2[nr_ciphers]=false + hexcode[nr_ciphers]="${hexc:2:2},${hexc:7:2}" + rfc_ciph[nr_ciphers]="${TLS_CIPHER_RFC_NAME[i]}" + if [[ "$p" == "tls1_3" ]]; then + [[ "${hexc:2:2}" == "13" ]] && nr_ciphers+=1 + elif [[ "$p" == "tls1_2" ]]; then + [[ "${hexc:2:2}" != "13" ]] && nr_ciphers+=1 + elif [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA256 ]] && \ + [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA384 ]] && \ + [[ "${TLS_CIPHER_RFC_NAME[i]}" != *"_CCM" ]] && \ + [[ "${TLS_CIPHER_RFC_NAME[i]}" != *"_CCM_8" ]]; then + nr_ciphers+=1 + fi + fi + done + while true; do + ciphers_to_test="" + for (( i=0; i < nr_ciphers; i++ )); do + ! "${ciphers_found2[i]}" && ciphers_to_test+=", ${hexcode[i]}" + done + [[ -z "$ciphers_to_test" ]] && break + tls_sockets "$proto_hex" "${ciphers_to_test:2}, 00,ff" "ephemeralkey" + [[ $? -ne 0 ]] && break + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + for (( i=0; i < nr_ciphers; i++ )); do + [[ "$cipher" == ${rfc_ciph[i]} ]] && ciphers_found2[i]=true && break + done + if [[ "$DISPLAY_CIPHERNAMES" =~ openssl ]] && [[ $TLS_NR_CIPHERS -ne 0 ]]; then + cipher="$(rfc2openssl "$cipher")" + # If there is no OpenSSL name for the cipher, then use the RFC name + [[ -z "$cipher" ]] && cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + fi + order+="$cipher " + done + elif [[ -n "$order" ]] && [[ "$DISPLAY_CIPHERNAMES" =~ rfc ]]; then + rfc_order="" + while read -d " " cipher; do + rfc_cipher="$(openssl2rfc "$cipher")" + if [[ -n "$rfc_cipher" ]]; then + rfc_order+="$rfc_cipher " + else + rfc_order+="$cipher " + fi + done <<< "$order" + order="$rfc_order" + fi + + if [[ -n "$order" ]]; then + add_tls_offered "$p" yes + outln + out "$(printf " %-10s " "$proto: ")" + if [[ "$COLOR" -le 2 ]]; then + out "$(out_row_aligned_max_width "$order" " " $TERM_WIDTH)" + else + out_row_aligned_max_width_by_entry "$order" " " $TERM_WIDTH pr_cipher_quality + fi + fileout "cipherorder_${proto//./_}" "INFO" "$order" + fi + + tmpfile_handle ${FUNCNAME[0]}-$p.txt + return 0 +} + + +# arg1 is OpenSSL s_client parameter or empty +# +get_host_cert() { + local tmpvar=$TEMPDIR/${FUNCNAME[0]}.txt # change later to $TMPFILE + + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI $1") 2>/dev/null $tmpvar + if sclient_connect_successful $? $tmpvar; then + awk '/-----BEGIN/,/-----END/ { print $0 }' $tmpvar >$HOSTCERT + return 0 + else + if [[ -z "$1" ]]; then + prln_warning "could not retrieve host certificate!" + fileout "host_certificate_Problem" "WARN" "Could not retrieve host certificate!" + fi + return 1 + fi + #tmpfile_handle ${FUNCNAME[0]}.txt + #return $((${PIPESTATUS[0]} + ${PIPESTATUS[1]})) +} + +verify_retcode_helper() { + local ret=0 + local -i retcode=$1 + + case $retcode in + # codes from ./doc/apps/verify.pod | verify(1ssl) + 44) tm_out "(different CRL scope)" ;; # X509_V_ERR_DIFFERENT_CRL_SCOPE + 26) tm_out "(unsupported certificate purpose)" ;; # X509_V_ERR_INVALID_PURPOSE + 24) tm_out "(certificate unreadable)" ;; # X509_V_ERR_INVALID_CA + 23) tm_out "(certificate revoked)" ;; # X509_V_ERR_CERT_REVOKED + 21) tm_out "(chain incomplete, only 1 cert provided)" ;; # X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE + 20) tm_out "(chain incomplete)" ;; # X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY + 19) tm_out "(self signed CA in chain)" ;; # X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN + 18) tm_out "(self signed)" ;; # X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT + 10) tm_out "(expired)" ;; # X509_V_ERR_CERT_HAS_EXPIRED + 9) tm_out "(not yet valid)" ;; # X509_V_ERR_CERT_NOT_YET_VALID + 2) tm_out "(issuer cert missing)" ;; # X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT + *) ret=1 ; tm_out " (unknown, pls report) $1" ;; + esac + return $ret +} + +# arg1: number of certificate if provided >1 +determine_trust() { + local jsonID="$1" + local json_postfix="$2" + local -i i=1 + local -i num_ca_bundles=0 + local bundle_fname="" + local -a certificate_file verify_retcode trust + local ok_was="" + local notok_was="" + local all_ok=true + local some_ok=false + local code + local ca_bundles="" + local spaces=" " + local -i certificates_provided=1+$(grep -c "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR/intermediatecerts.pem) + local addtl_warning + + # If $json_postfix is not empty, then there is more than one certificate + # and the output should should be indented by two more spaces. + [[ -n $json_postfix ]] && spaces=" " + + case $OSSL_VER_MAJOR.$OSSL_VER_MINOR in + 1.0.2|1.1.0|1.1.1|2.[1-9].*|3.*) # 2.x is LibreSSL. 2.1.1 was tested to work, below is not sure + : + ;; + *) addtl_warning="Your $OPENSSL <= 1.0.2 might be too unreliable to determine trust" + fileout "${jsonID}${json_postfix}" "WARN" "$addtl_warning" + addtl_warning="(${addtl_warning})" + ;; + esac + debugme tmln_out + + # if you run testssl.sh from a different path /you can set either TESTSSL_INSTALL_DIR or CA_BUNDLES_PATH to find the CA BUNDLES + if [[ -z "$CA_BUNDLES_PATH" ]]; then + ca_bundles="$TESTSSL_INSTALL_DIR/etc/testssl/*.pem" + else + ca_bundles="$CA_BUNDLES_PATH/*.pem" + fi + for bundle_fname in $ca_bundles; do + certificate_file[i]=$(basename ${bundle_fname//.pem}) + if [[ ! -r $bundle_fname ]]; then + prln_warning "\"$bundle_fname\" cannot be found / not readable" + return 1 + fi + debugme printf -- " %-12s" "${certificate_file[i]}" + # Set SSL_CERT_DIR to /dev/null so that $OPENSSL verify will only use certificates in $bundle_fname + # in a subshell because that should be valid here only + (export SSL_CERT_DIR="/dev/null"; export SSL_CERT_FILE="/dev/null" + if [[ $certificates_provided -ge 2 ]]; then + $OPENSSL verify -purpose sslserver -CAfile <(cat $ADDITIONAL_CA_FILES "$bundle_fname") -untrusted $TEMPDIR/intermediatecerts.pem $HOSTCERT >$TEMPDIR/${certificate_file[i]}.1 2>$TEMPDIR/${certificate_file[i]}.2 + else + $OPENSSL verify -purpose sslserver -CAfile <(cat $ADDITIONAL_CA_FILES "$bundle_fname") $HOSTCERT >$TEMPDIR/${certificate_file[i]}.1 2>$TEMPDIR/${certificate_file[i]}.2 + fi) + verify_retcode[i]=$(awk '/error [1-9][0-9]? at [0-9]+ depth lookup:/ { if (!found) {print $2; found=1} }' $TEMPDIR/${certificate_file[i]}.1 $TEMPDIR/${certificate_file[i]}.2) + [[ -z "${verify_retcode[i]}" ]] && verify_retcode[i]=0 + if [[ ${verify_retcode[i]} -eq 0 ]]; then + trust[i]=true + some_ok=true + [[ -z "$GOOD_CA_BUNDLE" ]] && GOOD_CA_BUNDLE="$bundle_fname" + debugme tm_svrty_good "Ok " + debugme tmln_out "${verify_retcode[i]}" + else + trust[i]=false + all_ok=false + debugme tm_svrty_high "not trusted " + debugme tmln_out "${verify_retcode[i]}" + fi + ((i++)) + done + num_ca_bundles=$((i - 1)) + debugme tm_out " " + if "$all_ok"; then + # all stores ok + pr_svrty_good "Ok "; pr_warning "$addtl_warning" + # we did to stdout the warning above already, so we could stay here with OK: + fileout "${jsonID}${json_postfix}" "OK" "passed. $addtl_warning" + else + # at least one failed + pr_svrty_critical "NOT ok" + if ! "$some_ok"; then + # all failed (we assume with the same issue), we're displaying the reason + out " " + code="$(verify_retcode_helper "${verify_retcode[1]}")" + if [[ "$code" =~ "pls report" ]]; then + pr_warning "$code" + else + out "$code" + fi + fileout "${jsonID}${json_postfix}" "CRITICAL" "failed $code. $addtl_warning" + else + # is one ok and the others not ==> display the culprit store + if "$some_ok"; then + pr_svrty_critical ":" + for ((i=1;i<=num_ca_bundles;i++)); do + if ${trust[i]}; then + ok_was="${certificate_file[i]} $ok_was" + else + #code="$(verify_retcode_helper ${verify_retcode[i]})" + #notok_was="${certificate_file[i]} $notok_was" + pr_svrty_high " ${certificate_file[i]} " + code="$(verify_retcode_helper "${verify_retcode[i]}")" + if [[ "$code" =~ "pls report" ]]; then + pr_warning "$code" + else + out "$code" + fi + notok_was="${certificate_file[i]} $code $notok_was" + fi + done + #pr_svrty_high "$notok_was " + #outln "$code" + outln + # lf + green ones + [[ "$DEBUG" -eq 0 ]] && tm_out "$spaces" + pr_svrty_good "OK: $ok_was" + fi + fileout "${jsonID}${json_postfix}" "CRITICAL" "Some certificate trust checks failed -> $notok_was $addtl_warning, OK -> $ok_was" + fi + [[ -n "$addtl_warning" ]] && out "\n$spaces" && pr_warning "$addtl_warning" + fi + outln + return 0 +} + +# not handled: Root CA supplied ("contains anchor" in SSLlabs terminology) + +tls_time() { + local difftime + local spaces=" " + local jsonID="TLS_timestamp" + + pr_bold " TLS clock skew" ; out "$spaces" + + if ( [[ "$STARTTLS_PROTOCOL" =~ ldap ]] || [[ "$STARTTLS_PROTOCOL" =~ irc ]] ); then + prln_local_problem "STARTTLS/$STARTTLS_PROTOCOL and --ssl-native collide here" + return 1 + fi + + TLS_DIFFTIME_SET=true # this is a switch whether we want to measure the remote TLS_TIME + tls_sockets "01" "$TLS_CIPHER" # try first TLS 1.0 (most frequently used protocol) + [[ -z "$TLS_TIME" ]] && tls_sockets "03" "$TLS12_CIPHER" # TLS 1.2 + [[ -z "$TLS_TIME" ]] && tls_sockets "02" "$TLS_CIPHER" # TLS 1.1 + [[ -z "$TLS_TIME" ]] && tls_sockets "00" "$TLS_CIPHER" # SSL 3 + + if [[ -n "$TLS_TIME" ]]; then # nothing returned a time! + difftime=$((TLS_TIME - TLS_NOW)) # TLS_NOW has been set in tls_sockets() + if [[ "${#difftime}" -gt 5 ]]; then + # openssl >= 1.0.1f fills this field with random values! --> good for possible fingerprint + out "Random values, no fingerprinting possible " + fileout "$jsonID" "INFO" "random" + else + [[ $difftime != "-"* ]] && [[ $difftime != "0" ]] && difftime="+$difftime" + out "$difftime"; out " sec from localtime"; + fileout "$jsonID" "INFO" "off by $difftime seconds from your localtime" + fi + debugme tm_out "$TLS_TIME" + outln + else + outln "SSLv3 through TLS 1.2 didn't return a timestamp" + fileout "$jsonID" "INFO" "None returned by SSLv3 through TLSv1.2" + fi + TLS_DIFFTIME_SET=false # reset the switch to save calls to date and friend in tls_sockets() + return 0 +} + +# core function determining whether handshake succeeded or not +# arg1: return value of "openssl s_client connect" +# arg2: temporary file with the server hello +# returns 0 if connect was successful, 1 if not +# +sclient_connect_successful() { + local server_hello="$(cat -v "$2")" + local re='Master-Key: ([^\ +]*)' + + [[ $1 -eq 0 ]] && return 0 + if [[ "$server_hello" =~ $re ]]; then + [[ -n "${BASH_REMATCH[1]}" ]] && return 0 + fi + [[ "$server_hello" =~ (New|Reused)", "(SSLv[23]|TLSv1(\.[0-3])?(\/SSLv3)?)", Cipher is "([A-Z0-9]+-[A-Za-z0-9\-]+|TLS_[A-Za-z0-9_]+) ]] && return 0 + # what's left now is: master key empty and Session-ID not empty + # ==> probably client-based auth with x509 certificate. We handle that at other places + # + # For robustness we also detected here network / server connectivity problems: + # Just need to check whether $TMPFILE=$2 is empty + if [[ ! -s "$2" ]]; then + ((NR_OSSL_FAIL++)) + connectivity_problem $NR_OSSL_FAIL $MAX_OSSL_FAIL "openssl s_client connect problem" "repeated openssl s_client connect problem, doesn't make sense to continue" + fi + return 1 +} + +extract_new_tls_extensions() { + local tls_extensions + + # this is not beautiful (grep+sed) + # but maybe we should just get the ids and do a private matching, according to + # https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml + tls_extensions=$(grep -a 'TLS server extension ' "$1" | \ + sed -e 's/TLS server extension //g' -e 's/\" (id=/\/#/g' \ + -e 's/,.*$/,/g' -e 's/),$/\"/g' \ + -e 's/elliptic curves\/#10/supported_groups\/#10/g') + tls_extensions=$(echo $tls_extensions) # into one line + + if [[ -n "$tls_extensions" ]]; then + # check to see if any new TLS extensions were returned and add any new ones to TLS_EXTENSIONS + while read -d "\"" -r line; do + if [[ $line != "" ]] && [[ ! "$TLS_EXTENSIONS" =~ "$line" ]]; then +#FIXME: This is a string of quoted strings, so this seems to determine the output format already. Better e.g. would be an array + TLS_EXTENSIONS+=" \"${line}\"" + fi + done <<<$tls_extensions + [[ "${TLS_EXTENSIONS:0:1}" == " " ]] && TLS_EXTENSIONS="${TLS_EXTENSIONS:1}" + fi +} + +# Note that since, at the moment, this function is only called by run_server_defaults() +# and run_heartbleed(), this function does not look for the status request or NPN +# extensions. For run_heartbleed(), only the heartbeat extension needs to be detected. +# For run_server_defaults(), the status request and NPN would already be detected by +# get_server_certificate(), if they are supported. In the case of the status extension, +# since including a status request extension in a ClientHello does not work for GOST +# only servers. In the case of NPN, since a server will not include both the NPN and +# ALPN extensions in the same ServerHello. +# +determine_tls_extensions() { + local addcmd + local -i success=1 + local line params="" tls_extensions="" + local alpn_proto alpn="" alpn_list_len_hex alpn_extn_len_hex + local -i alpn_list_len alpn_extn_len + local cbc_cipher_list="ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DH-RSA-AES256-SHA256:DH-DSS-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CAMELLIA256-SHA384:DHE-RSA-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA256:DH-RSA-CAMELLIA256-SHA256:DH-DSS-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA384:ECDH-ECDSA-AES256-SHA384:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:ECDH-RSA-CAMELLIA256-SHA384:ECDH-ECDSA-CAMELLIA256-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA256:CAMELLIA256-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DH-RSA-AES128-SHA256:DH-DSS-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA256:DH-RSA-CAMELLIA128-SHA256:DH-DSS-CAMELLIA128-SHA256:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA256:ECDH-ECDSA-AES128-SHA256:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:ECDH-RSA-CAMELLIA128-SHA256:ECDH-ECDSA-CAMELLIA128-SHA256:AES128-SHA256:AES128-SHA:CAMELLIA128-SHA256:SEED-SHA:CAMELLIA128-SHA:IDEA-CBC-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA" + local cbc_cipher_list_hex="c0,28, c0,24, c0,14, c0,0a, 00,6b, 00,6a, 00,69, 00,68, 00,39, 00,38, 00,37, 00,36, c0,77, c0,73, 00,c4, 00,c3, 00,c2, 00,c1, 00,88, 00,87, 00,86, 00,85, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, 00,3d, 00,35, 00,c0, 00,84, c0,3d, c0,3f, c0,41, c0,43, c0,45, c0,49, c0,4b, c0,4d, c0,4f, c0,27, c0,23, c0,13, c0,09, 00,67, 00,40, 00,3f, 00,3e, 00,33, 00,32, 00,31, 00,30, c0,76, c0,72, 00,be, 00,bd, 00,bc, 00,bb, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, 00,3c, 00,2f, 00,ba, 00,96, 00,41, 00,07, c0,3c, c0,3e, c0,40, c0,42, c0,44, c0,48, c0,4a, c0,4c, c0,4e, c0,12, c0,08, 00,16, 00,13, 00,10, 00,0d, c0,0d, c0,03, 00,0a, fe,ff, ff,e0, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,62, 00,09, fe,fe, ff,e1, 00,14, 00,11, 00,08, 00,06, 00,0b, 00,0e" + local using_sockets=true + + [[ "$OPTIMAL_PROTO" == -ssl2 ]] && return 0 + "$SSL_NATIVE" && using_sockets=false + + if "$using_sockets"; then + tls_extensions="00,01,00,01,02, 00,02,00,00, 00,04,00,00, 00,12,00,00, 00,16,00,00, 00,17,00,00" + if [[ -z $STARTTLS ]]; then + for alpn_proto in $ALPN_PROTOs; do + alpn+=",$(printf "%02x" ${#alpn_proto}),$(string_to_asciihex "$alpn_proto")" + done + alpn_list_len=${#alpn}/3 + alpn_list_len_hex=$(printf "%04x" $alpn_list_len) + alpn_extn_len=$alpn_list_len+2 + alpn_extn_len_hex=$(printf "%04x" $alpn_extn_len) + tls_extensions+=", 00,10,${alpn_extn_len_hex:0:2},${alpn_extn_len_hex:2:2},${alpn_list_len_hex:0:2},${alpn_list_len_hex:2:2}$alpn" + fi + if [[ ! "$TLS_EXTENSIONS" =~ encrypt-then-mac ]]; then + tls_sockets "03" "$cbc_cipher_list_hex, 00,ff" "all" "$tls_extensions" + success=$? + fi + if [[ $success -ne 0 ]] && [[ $success -ne 2 ]]; then + tls_sockets "03" "$TLS12_CIPHER" "all" "$tls_extensions" + success=$? + fi + [[ $success -eq 2 ]] && success=0 + [[ $success -eq 0 ]] && extract_new_tls_extensions "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" + if [[ -r "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" ]]; then + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + tmpfile_handle ${FUNCNAME[0]}.txt + fi + else + if "$HAS_ALPN" && [[ -z $STARTTLS ]]; then + params="-alpn \"${ALPN_PROTOs// /,}\"" # we need to replace " " by "," + elif "$HAS_NPN" && [[ -z $STARTTLS ]]; then + params="-nextprotoneg \"$NPN_PROTOs\"" + fi + if [[ -z "$OPTIMAL_PROTO" ]] && [[ -z "$SNI" ]] && "$HAS_NO_SSL2"; then + addcmd="-no_ssl2" + else + addcmd="$SNI" + fi + if [[ ! "$TLS_EXTENSIONS" =~ encrypt-then-mac ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $addcmd $OPTIMAL_PROTO -tlsextdebug $params -cipher $cbc_cipher_list") $ERRFILE >$TMPFILE + sclient_connect_successful $? $TMPFILE + success=$? + fi + if [[ $success -ne 0 ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $addcmd $OPTIMAL_PROTO -tlsextdebug $params") $ERRFILE >$TMPFILE + sclient_connect_successful $? $TMPFILE + success=$? + fi + [[ $success -eq 0 ]] && extract_new_tls_extensions $TMPFILE + tmpfile_handle ${FUNCNAME[0]}.txt + fi + return $success +} + +extract_certificates() { + local version="$1" + local savedir + local -i i success nrsaved=0 + local issuerDN CAsubjectDN previssuerDN + + # Place the server's certificate in $HOSTCERT and any intermediate + # certificates that were provided in $TEMPDIR/intermediatecerts.pem + savedir="$PWD"; cd $TEMPDIR + # https://backreference.org/2010/05/09/ocsp-verification-with-openssl/ + if [[ "$version" == ssl2 ]]; then + awk -v n=-1 '/Server certificate/ {start=1} + /-----BEGIN CERTIFICATE-----/{ if (start) {inc=1; n++} } + inc { print > ("level" n ".crt") } + /---END CERTIFICATE-----/{ inc=0 }' $TMPFILE + else + awk -v n=-1 '/Certificate chain/ {start=1} + /-----BEGIN CERTIFICATE-----/{ if (start) {inc=1; n++} } + inc { print > ("level" n ".crt") } + /---END CERTIFICATE-----/{ inc=0 }' $TMPFILE + fi + [[ -s level0.crt ]] && nrsaved=$(count_words "$(echo level?.crt 2>/dev/null)") + if [[ $nrsaved -eq 0 ]]; then + success=1 + else + success=0 + CERTIFICATE_LIST_ORDERING_PROBLEM=false + mv level0.crt $HOSTCERT + if [[ $nrsaved -eq 1 ]]; then + echo "" > $TEMPDIR/intermediatecerts.pem + else + cat level?.crt > $TEMPDIR/intermediatecerts.pem + issuerDN="$($OPENSSL x509 -in $HOSTCERT -noout -issuer 2>/dev/null)" + issuerDN="${issuerDN:8}" + previssuerDN="$issuerDN" + # The second certificate (level1.crt) SHOULD be issued to the CA + # that issued the server's certificate. But, according to RFC 8446 + # clients SHOULD be prepared to handle cases in which the server + # does not order the certificates correctly. + for (( i=1; i < nrsaved; i++ )); do + CAsubjectDN="$($OPENSSL x509 -in "level$i.crt" -noout -subject 2>/dev/null)" + if [[ "${CAsubjectDN:9}" == "$issuerDN" ]]; then + cp "level$i.crt" $TEMPDIR/hostcert_issuer.pem + issuerDN="" # set to empty to prevent further matches + fi + [[ "${CAsubjectDN:9}" != "$previssuerDN" ]] && CERTIFICATE_LIST_ORDERING_PROBLEM=true + "$CERTIFICATE_LIST_ORDERING_PROBLEM" && [[ -z "$issuerDN" ]] && break + previssuerDN="$($OPENSSL x509 -in "level$i.crt" -noout -issuer 2>/dev/null)" + previssuerDN="${previssuerDN:8}" + done + # This should never happen, but if more than one certificate was + # provided and none of them belong to the CA that issued the + # server's certificate, then the extra certificates should just + # be deleted. There is code elsewhere that assumes that if + # $TEMPDIR/intermediatecerts.pem is non-empty, then + # $TEMPDIR/hostcert_issuer.pem is also present. + [[ -n "$issuerDN" ]] && echo "" > $TEMPDIR/intermediatecerts.pem + rm level?.crt + fi + fi + cd "$savedir" + return $success +} + +extract_stapled_ocsp() { + local response="$(cat $TMPFILE)" + local ocsp tmp + local -i ocsp_len + + STAPLED_OCSP_RESPONSE="" + if [[ "$response" =~ CertificateStatus ]]; then + # This is OpenSSL 1.1.0 or 1.1.1 and the response + # is TLS 1.2 or earlier. + ocsp="${response##*CertificateStatus}" + ocsp="16${ocsp#*16}" + ocsp="${ocsp%%<<<*}" + ocsp="$(strip_spaces "$(newline_to_spaces "$ocsp")")" + ocsp="${ocsp:8}" + elif [[ "$response" =~ "TLS server extension \"status request\" (id=5), len=0" ]]; then + # This is not OpenSSL 1.1.0 or 1.1.1, and the response + # is TLS 1.2 or earlier. + ocsp="${response%%OCSP response:*}" + ocsp="${ocsp##*<<<}" + ocsp="16${ocsp#*16}" + ocsp="$(strip_spaces "$(newline_to_spaces "$ocsp")")" + ocsp="${ocsp:8}" + elif [[ "$response" =~ "TLS server extension \"status request\" (id=5), len=" ]]; then + # This is OpenSSL 1.1.1 and the response is TLS 1.3. + ocsp="${response##*TLS server extension \"status request\" (id=5), len=}" + ocsp="${ocsp%%<<<*}" + tmp="${ocsp%%[!0-9]*}" + ocsp="${ocsp#$tmp}" + ocsp_len=2*$tmp + ocsp="$(awk ' { print $3 $4 $5 $6 $7 $8 $9 $10 $11 $12 $13 $14 $15 $16 $17 } ' <<< "$ocsp" | sed 's/-//')" + ocsp="$(strip_spaces "$(newline_to_spaces "$ocsp")")" + ocsp="${ocsp:0:ocsp_len}" + else + return 0 + fi + # Determine whether this is a single OCSP response or a sequence of + # responses and then extract just the response for the server's + # certificate. + if [[ "${ocsp:0:2}" == "01" ]]; then + STAPLED_OCSP_RESPONSE="${ocsp:8}" + elif [[ "${ocsp:0:2}" == "02" ]]; then + ocsp_len=2*$(hex2dec "${tls_certificate_status_ascii:8:6}") + STAPLED_OCSP_RESPONSE="${ocsp:14:ocsp_len}" + fi + return 0 +} + +# arg1 is "" +# arg2 is a list of protocols to try (tls1_2, tls1_1, tls1, ssl3) or empty (if all should be tried) +get_server_certificate() { + local protocols_to_try proto + local success ret + local npn_params="" line + local ciphers_to_test="" + # Cipher suites that use a certifiate with an RSA (signature) public key + local -r a_rsa="cc,13, cc,15, c0,30, c0,28, c0,14, 00,9f, cc,a8, cc,aa, c0,a3, c0,9f, 00,6b, 00,39, c0,77, 00,c4, 00,88, c0,45, c0,4d, c0,53, c0,61, c0,7d, c0,8b, 16,b7, 16,b9, c0,2f, c0,27, c0,13, 00,9e, c0,a2, c0,9e, 00,67, 00,33, c0,76, 00,be, 00,9a, 00,45, c0,44, c0,4c, c0,52, c0,60, c0,7c, c0,8a, c0,11, c0,12, 00,16, 00,15, 00,14, c0,10" + # Cipher suites that use a certifiate with an RSA (encryption) public key + local -r e_rsa="00,b7, c0,99, 00,ad, cc,ae, 00,9d, c0,a1, c0,9d, 00,3d, 00,35, 00,c0, 00,84, 00,95, c0,3d, c0,51, c0,69, c0,6f, c0,7b, c0,93, ff,01, 00,ac, c0,a0, c0,9c, 00,9c, 00,3c, 00,2f, 00,ba, 00,b6, 00,96, 00,41, c0,98, 00,07, 00,94, c0,3c, c0,50, c0,68, c0,6e, c0,7a, c0,92, 00,05, 00,04, 00,92, 00,0a, 00,93, fe,ff, ff,e0, 00,62, 00,09, 00,61, fe,fe, ff,e1, 00,64, 00,60, 00,08, 00,06, 00,03, 00,b9, 00,b8, 00,2e, 00,3b, 00,02, 00,01, ff,00" + # Cipher suites that use a certifiate with a DSA public key + local -r a_dss="00,a3, 00,6a, 00,38, 00,c3, 00,87, c0,43, c0,57, c0,81, 00,a2, 00,40, 00,32, 00,bd, 00,99, 00,44, c0,42, c0,56, c0,80, 00,66, 00,13, 00,63, 00,12, 00,65, 00,11" + # Cipher suites that use a certifiate with a DH public key + local -r a_dh="00,a5, 00,a1, 00,69, 00,68, 00,37, 00,36, 00,c2, 00,c1, 00,86, 00,85, c0,3f, c0,41, c0,55, c0,59, c0,7f, c0,83, 00,a4, 00,a0, 00,3f, 00,3e, 00,31, 00,30, 00,bc, 00,bb, 00,98, 00,97, 00,43, 00,42, c0,3e, c0,40, c0,54, c0,58, c0,7e, c0,82, 00,10, 00,0d, 00,0f, 00,0c, 00,0b, 00,0e" + # Cipher suites that use a certifiate with an ECDH public key + local -r a_ecdh="c0,32, c0,2e, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, c0,4b, c0,4f, c0,5f, c0,63, c0,89, c0,8d, c0,31, c0,2d, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, c0,4a, c0,4e, c0,5e, c0,62, c0,88, c0,8c, c0,0c, c0,02, c0,0d, c0,03, c0,0b, c0,01" + # Cipher suites that use a certifiate with an ECDSA public key + local -r a_ecdsa="cc,14, c0,2c, c0,24, c0,0a, cc,a9, c0,af, c0,ad, c0,73, c0,49, c0,5d, c0,87, 16,b8, 16,ba, c0,2b, c0,23, c0,09, c0,ae, c0,ac, c0,72, c0,48, c0,5c, c0,86, c0,07, c0,08, c0,06" + # Cipher suites that use a certifiate with a GOST public key + local -r a_gost="00,80, 00,81, 00,82, 00,83" + local using_sockets=true + + "$SSL_NATIVE" && using_sockets=false + + CERTIFICATE_LIST_ORDERING_PROBLEM=false + if [[ "$1" =~ "tls1_3" ]]; then + [[ $(has_server_protocol "tls1_3") -eq 1 ]] && return 1 + if "$HAS_TLS13"; then + if [[ "$1" =~ "tls1_3_RSA" ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -showcerts -connect $NODEIP:$PORT $PROXY $SNI -tls1_3 -tlsextdebug -status -msg -sigalgs PSS+SHA256:PSS+SHA384") $ERRFILE >$TMPFILE + elif [[ "$1" =~ "tls1_3_ECDSA" ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -showcerts -connect $NODEIP:$PORT $PROXY $SNI -tls1_3 -tlsextdebug -status -msg -sigalgs ECDSA+SHA256:ECDSA+SHA384") $ERRFILE >$TMPFILE + else + return 1 + fi + sclient_connect_successful $? $TMPFILE || return 1 + DETECTED_TLS_VERSION="0304" + extract_certificates "tls1_3" + extract_stapled_ocsp + success=$? + else + # For STARTTLS protcols not being implemented yet via sockets this is a bypass otherwise it won't be usable at all (e.g. LDAP) + if ( [[ "$STARTTLS" =~ ldap ]] || [[ "$STARTTLS" =~ irc ]] ); then + return 1 + elif [[ "$1" =~ "tls1_3_RSA" ]]; then + tls_sockets "04" "$TLS13_CIPHER" "all" "00,12,00,00, 00,05,00,05,01,00,00,00,00, 00,0d,00,10,00,0e,08,04,08,05,08,06,04,01,05,01,06,01,02,01" + elif [[ "$1" =~ "tls1_3_ECDSA" ]]; then + tls_sockets "04" "$TLS13_CIPHER" "all" "00,12,00,00, 00,05,00,05,01,00,00,00,00, 00,0d,00,0a,00,08,04,03,05,03,06,03,02,03" + else + return 1 + fi + success=$? + [[ $success -eq 0 ]] || return 1 + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + fi + [[ $success -eq 0 ]] && add_tls_offered tls1_3 yes + extract_new_tls_extensions $TMPFILE + tmpfile_handle ${FUNCNAME[0]}.txt + return $success + fi + + "$HAS_NPN" && [[ -z "$STARTTLS" ]] && npn_params="-nextprotoneg \"$NPN_PROTOs\"" + + if [[ -n "$2" ]]; then + protocols_to_try="$2" + else + protocols_to_try="tls1_2 tls1_1 tls1 ssl3" + fi + + # throwing 1st every cipher/protocol at the server to know what works + success=7 + + if [[ "$OPTIMAL_PROTO" == -ssl2 ]]; then + success=1 + sslv2_sockets "" "true" + if [[ $? -eq 3 ]]; then + mv $TEMPDIR/$NODEIP.parse_sslv2_serverhello.txt $TMPFILE + success=0 + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return $success + fi + + if "$using_sockets"; then + protocols_to_try="${protocols_to_try/tls1_2/03}" + protocols_to_try="${protocols_to_try/tls1_1/02}" + protocols_to_try="${protocols_to_try/tls1/01}" + protocols_to_try="${protocols_to_try/ssl3/00}" + + [[ "$1" =~ aRSA ]] && ciphers_to_test+=", $a_rsa" + [[ "$1" =~ eRSA ]] && ciphers_to_test+=", $e_rsa" + [[ "$1" =~ aDSS ]] && ciphers_to_test+=", $a_dss" + [[ "$1" =~ aDH ]] && ciphers_to_test+=", $a_dh" + [[ "$1" =~ aECDH ]] && ciphers_to_test+=", $a_ecdh" + [[ "$1" =~ aECDSA ]] && ciphers_to_test+=", $a_ecdsa" + [[ "$1" =~ aGOST ]] && ciphers_to_test+=", $a_gost" + + [[ -z "$ciphers_to_test" ]] && return 1 + ciphers_to_test="${ciphers_to_test:2}" + + for proto in $protocols_to_try; do + [[ 1 -eq $(has_server_protocol $proto) ]] && continue + tls_sockets "$proto" "$ciphers_to_test, 00,ff" "all" "00,12,00,00, 00,05,00,05,01,00,00,00,00" + ret=$? + [[ $ret -eq 0 ]] && success=0 && break + [[ $ret -eq 2 ]] && success=0 && break + done # this loop is needed for IIS6 and others which have a handshake size limitations + if [[ $success -eq 7 ]]; then + # "-status" above doesn't work for GOST only servers, so we do another test without it and see whether that works then: + tls_sockets "$proto" "$ciphers_to_test, 00,ff" "all" "00,12,00,00" + ret=$? + [[ $ret -eq 0 ]] && success=0 + [[ $ret -eq 2 ]] && success=0 + if [[ $success -eq 7 ]]; then + if [ -z "$1" ]; then + prln_warning "Strange, no SSL/TLS protocol seems to be supported (error around line $((LINENO - 6)))" + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return 7 # this is ugly, I know + else + GOST_STATUS_PROBLEM=true + fi + fi + cp $TEMPDIR/$NODEIP.parse_tls_serverhello.txt $TMPFILE + + # When "$2" is empty, get_server_certificate() is being called with SNI="". + # In case the extensions returned by the server differ depending on wheter + # SNI is provided or not, don't collect extensions when SNI="" (unless + # no DNS name was provided at the command line). + [[ -z "$2" ]] && extract_new_tls_extensions $TMPFILE + else + # no sockets, openssl + ciphers_to_test="$1" + if [[ "$1" =~ aRSA ]] && [[ "$1" =~ eRSA ]]; then + ciphers_to_test="${ciphers_to_test/eRSA/}" + elif [[ "$1" =~ aRSA ]]; then + ciphers_to_test="${ciphers_to_test/aRSA/}" + for ciph in $(colon_to_spaces $(actually_supported_osslciphers "aRSA")); do + [[ "$ciph" =~ -RSA- ]] && ciphers_to_test+=":$ciph" + done + elif [[ "$1" =~ eRSA ]]; then + ciphers_to_test="${ciphers_to_test/eRSA/}" + for ciph in $(colon_to_spaces $(actually_supported_osslciphers "aRSA")); do + [[ ! "$ciph" =~ -RSA- ]] && ciphers_to_test+=":$ciph" + done + fi + ciphers_to_test="${ciphers_to_test/::/:}" + [[ "${ciphers_to_test:0:1}" == : ]] && ciphers_to_test="${ciphers_to_test:1}" + [[ $(count_ciphers $(actually_supported_osslciphers "$ciphers_to_test")) -ge 1 ]] || return 1 + + for proto in $protocols_to_try; do + [[ 1 -eq $(has_server_protocol $proto) ]] && continue + [[ "$proto" == ssl3 ]] && ! "$HAS_SSL3" && continue + addcmd="" + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -cipher $ciphers_to_test -showcerts -connect $NODEIP:$PORT $PROXY $SNI -$proto -tlsextdebug $npn_params -status -msg") $ERRFILE >$TMPFILE + if sclient_connect_successful $? $TMPFILE; then + success=0 + break # now we have the certificate + fi + done # this loop is needed for IIS6 and others which have a handshake size limitations + if [[ $success -eq 7 ]]; then + # "-status" above doesn't work for GOST only servers, so we do another test without it and see whether that works then: + [[ "$proto" == ssl3 ]] && ! "$HAS_SSL3" && return 7 + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -cipher $ciphers_to_test -showcerts -connect $NODEIP:$PORT $PROXY $SNI -$proto -tlsextdebug") >$ERRFILE >$TMPFILE + if ! sclient_connect_successful $? $TMPFILE; then + if [ -z "$1" ]; then + prln_warning "Strange, no SSL/TLS protocol seems to be supported (error around line $((LINENO - 6)))" + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return 7 # this is ugly, I know + else + GOST_STATUS_PROBLEM=true + fi + fi + case "$proto" in + "tls1_2") DETECTED_TLS_VERSION="0303" ;; + "tls1_1") DETECTED_TLS_VERSION="0302" ;; + "tls1") DETECTED_TLS_VERSION="0301" ;; + "ssl3") DETECTED_TLS_VERSION="0300" ;; + esac + # When "$2" is empty, get_server_certificate() is being called with SNI="". + # In case the extensions returned by the server differ depending on wheter + # SNI is provided or not, don't collect extensions when SNI="" (unless + # no DNS name was provided at the command line). + [[ -z "$2" ]] && extract_new_tls_extensions $TMPFILE + + extract_certificates "$proto" + extract_stapled_ocsp + success=$? + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return $success +} + +# arg1: path to certificate +# returns CN +get_cn_from_cert() { + local subject + + # attention! openssl 1.0.2 doesn't properly handle online output from certificates from trustwave.com/github.com + #FIXME: use -nameopt oid for robustness + + # for e.g. russian sites -esc_msb,utf8 works in an UTF8 terminal -- any way to check platform independent? + # see x509(1ssl): + subject="$($OPENSSL x509 -in $1 -noout -subject -nameopt multiline,-align,sname,-esc_msb,utf8,-space_eq 2>>$ERRFILE)" + echo "$(awk -F'=' '/CN=/ { print $2 }' <<< "$subject" | tr '\n' ' ')" + return $? +} + +# Return 0 if the name provided in arg1 is a wildcard name +is_wildcard() +{ + local certname="$1" + + # If the first label in the DNS name begins "xn--", then assume it is an + # A-label and not a wildcard name (RFC 6125, Section 6.4.3). + [[ "${certname:0:4}" == "xn--" ]] && return 1 + + # Remove part of name preceding '*' or '.'. If no "*" appears in the + # left-most label, then it is not a wildcard name (RFC 6125, Section 6.4.3). + basename="$(echo -n "$certname" | sed 's/^[_a-zA-Z0-9\-]*//')" + [[ "${basename:0:1}" != "*" ]] && return 1 # not a wildcard name + + # Check that there are no additional wildcard ('*') characters or any + # other characters that do not belong in a DNS name. + [[ -n $(echo -n "${basename:1}" | sed 's/^[_\.a-zA-Z0-9\-]*//') ]] && return 1 + return 0 +} + +# Return 0 if the name provided in arg2 is a wildcard name and it matches the name provided in arg1. +wildcard_match() +{ + local servername="$1" + local certname="$2" + local basename + local -i basename_offset len_certname len_part1 len_basename + local -i len_servername len_wildcard + + len_servername=${#servername} + len_certname=${#certname} + + # Use rules from RFC 6125 to perform the match. + + # Assume the "*" in the wildcard needs to be replaced by one or more + # characters, although RFC 6125 is not clear about that. + [[ $len_servername -lt $len_certname ]] && return 1 + + is_wildcard "$certname" + [[ $? -ne 0 ]] && return 1 + + # Comparisons of DNS names are case insensitive, so convert both names to uppercase. + certname="$(toupper "$certname")" + servername="$(toupper "$servername")" + + # Extract part of name that comes after the "*" + basename="$(echo -n "$certname" | sed 's/^[_A-Z0-9\-]*\*//')" + len_basename=${#basename} + len_part1=$len_certname-$len_basename-1 + len_wildcard=$len_servername-$len_certname+1 + basename_offset=$len_servername-$len_basename + + # Check that initial part of $servername matches initial part of $certname + # and that final part of $servername matches final part of $certname. + [[ "${servername:0:len_part1}" != "${certname:0:len_part1}" ]] && return 1 + [[ "${servername:basename_offset:len_basename}" != "$basename" ]] && return 1 + + # Check that part of $servername that matches "*" is all part of a single + # domain label. + [[ -n $(echo -n "${servername:len_part1:len_wildcard}" | sed 's/^[_A-Z0-9\-]*//') ]] && return 1 + + return 0 +} + +# Compare the server name provided in arg1 to the CN and SAN in arg2 and return: +# 0, if server name provided does not match any of the names in the CN or SAN +# 1, if the server name provided matches a name in the SAN +# 2, if the server name provided is a wildcard match against a name in the SAN +# 4, if the server name provided matches the CN +# 5, if the server name provided matches the CN AND a name in the SAN +# 6, if the server name provided matches the CN AND is a wildcard match against a name in the SAN +# 8, if the server name provided is a wildcard match against the CN +# 9, if the server name provided matches a name in the SAN AND is a wildcard match against the CN +# 10, if the server name provided is a wildcard match against the CN AND a name in the SAN + +compare_server_name_to_cert() { + local cert="$1" + local servername cn dns_sans ip_sans san dercert tag + local srv_id="" xmppaddr="" + local -i i len len1 + local -i subret=0 # no error condition, passing results + + HAS_DNS_SANS=false + if [[ -n "$XMPP_HOST" ]]; then + # RFC 6120, Section 13.7.2.1, states that for XMPP the identity that + # should appear in the server's certificate is identity that appears + # in the the 'to' address that the client communicates in the initial + # stream header. + servername="$(toupper "$XMPP_HOST")" + else + servername="$(toupper "$NODE")" + fi + + # Check whether any of the DNS names in the certificate match the servername + dns_sans="$(get_san_dns_from_cert "$cert")" + while read san; do + if [[ -n "$san" ]]; then + HAS_DNS_SANS=true + [[ $(toupper "$san") == "$servername" ]] && subret=1 && break + fi + done <<< "$dns_sans" + + if [[ $subret -eq 0 ]]; then + # Check whether any of the IP addresses in the certificate match the servername + ip_sans=$($OPENSSL x509 -in "$cert" -noout -text 2>>$ERRFILE | grep -A2 "Subject Alternative Name" | \ + tr ',' '\n' | grep "IP Address:" | sed -e 's/IP Address://g' -e 's/ //g') + while read san; do + [[ -n "$san" ]] && [[ "$san" == "$servername" ]] && subret=1 && break + done <<< "$ip_sans" + fi + + if [[ $subret -eq 0 ]] && [[ -n "$XMPP_HOST" ]]; then + # For XMPP hosts, in addition to checking for a matching DNS name, + # should also check for a matching SRV-ID or XmppAddr identifier. + dercert="$($OPENSSL x509 -in "$cert" -outform DER 2>>$ERRFILE | hexdump -v -e '16/1 "%02X"')" + # Look for the beginning of the subjectAltName extension. It + # will begin with the OID (2.5.29.17 = 0603551D11). After the OID + # there may be an indication that the extension is critical (0101FF). + # Finally will be the tag indicating that the value of the extension is + # encoded as an OCTET STRING (04). + if [[ "$dercert" =~ 0603551D110101FF04 ]]; then + dercert="${dercert##*0603551D110101FF04}" + else + dercert="${dercert##*0603551D1104}" + fi + # Skip over the encoding of the length of the OCTET STRING. + if [[ "${dercert:0:1}" == "8" ]]; then + i="${dercert:1:1}" + i=2*$i+2 + dercert="${dercert:i}" + else + dercert="${dercert:2}" + fi + # Next byte should be a 30 (SEQUENCE). + if [[ "${dercert:0:2}" == "30" ]]; then + # Get the length of the subjectAltName extension and then skip + # over the encoding of the length. + if [[ "${dercert:2:1}" == "8" ]]; then + case "${dercert:3:1}" in + 1) len=2*0x${dercert:4:2}; dercert="${dercert:6}" ;; + 2) len=2*0x${dercert:4:4}; dercert="${dercert:8}" ;; + 3) len=2*0x${dercert:4:6}; dercert="${dercert:10}" ;; + *) len=0 ;; + esac + else + len=2*0x${dercert:2:2} + dercert="${dercert:4}" + fi + if [[ $len -ne 0 ]] && [[ $len -lt ${#dercert} ]]; then + # loop through all the names and extract the SRV-ID and XmppAddr identifiers + for (( i=0; i < len; i=i+len_name )); do + tag="${dercert:i:2}" + i+=2 + if [[ "${dercert:i:1}" == "8" ]]; then + i+=1 + case "${dercert:i:1}" in + 1) i+=1; len_name=2*0x${dercert:i:2}; i+=2 ;; + 2) i+=1; len_name=2*0x${dercert:i:4}; i+=4 ;; + 3) i+=1; len_name=2*0x${dercert:i:6}; i+=4 ;; + *) len=0 ;; + esac + else + len_name=2*0x${dercert:i:2} + i+=2 + fi + if [[ "$tag" == "A0" ]]; then + # This is an otherName. + if [[ $len_name -gt 18 ]] && ( [[ "${dercert:i:20}" == "06082B06010505070805" ]] || \ + [[ "${dercert:i:20}" == "06082B06010505070807" ]] ); then + # According to the OID, this is either an SRV-ID or XmppAddr. + j=$i+20 + if [[ "${dercert:j:2}" == "A0" ]]; then + j+=2 + if [[ "${dercert:j:1}" == "8" ]]; then + j+=1 + j+=2*0x${dercert:j:1}+1 + else + j+=2 + fi + if ( [[ "${dercert:i:20}" == "06082B06010505070805" ]] && [[ "${dercert:j:2}" == "0C" ]] ) || \ + ( [[ "${dercert:i:20}" == "06082B06010505070807" ]] && [[ "${dercert:j:2}" == "16" ]] ); then + # XmppAddr should be encoded as UTF8STRING (0C) and + # SRV-ID should be encoded IA5STRING (16). + j+=2 + if [[ "${dercert:j:1}" == "8" ]]; then + j+=1 + case "${dercert:j:1}" in + 1) j+=1; len1=2*0x${dercert:j:2}; j+=2 ;; + 2) j+=1; len1=2*0x${dercert:j:4}; j+=4 ;; + 3) j+=1; len1=2*0x${dercert:j:6}; j+=6 ;; + 4) len1=0 ;; + esac + else + len1=2*0x${dercert:j:2} + j+=2 + fi + if [[ $len1 -ne 0 ]]; then + san="$(asciihex_to_binary "${dercert:j:len1}")" + if [[ "${dercert:i:20}" == "06082B06010505070805" ]]; then + xmppaddr+="$san " + else + srv_id+="$san " + fi + fi + fi + fi + fi + fi + done + fi + fi + [[ -n "$srv_id" ]] && HAS_DNS_SANS=true + [[ -n "$xmppaddr" ]] && HAS_DNS_SANS=true + while read -d " " san; do + [[ -n "$san" ]] && [[ $(toupper "$san") == "_XMPP-SERVER.$servername" ]] && subret=1 && break + done <<< "$srv_id" + if [[ $subret -eq 0 ]]; then + while read -d " " san; do + [[ -n "$san" ]] && [[ $(toupper "$san") == "$servername" ]] && subret=1 && break + done <<< "$xmppaddr" + fi + fi + + # Check whether any of the DNS names in the certificate are wildcard names + # that match the servername + if [[ $subret -eq 0 ]]; then + while read san; do + [[ -n "$san" ]] || continue + wildcard_match "$servername" "$san" + [[ $? -eq 0 ]] && subret=2 && break + done <<< "$dns_sans" + fi + + cn="$(get_cn_from_cert "$cert")" + + # If the CN contains any characters that are not valid for a DNS name, + # then assume it does not contain a DNS name. + [[ -n $(sed 's/^[_\.a-zA-Z0-9*\-]*//' <<< "$cn") ]] && return $subret + + # Check whether the CN in the certificate matches the servername + [[ $(toupper "$cn") == "$servername" ]] && subret+=4 && return $subret + + # Check whether the CN in the certificate is a wildcard name that matches + # the servername + wildcard_match "$servername" "$cn" + [[ $? -eq 0 ]] && subret+=8 + return $subret +} + +# This function determines whether the certificate (arg3) contains "visibility +# information" (see Section 4.3.3 of +# https://www.etsi.org/deliver/etsi_ts/103500_103599/10352303/01.01.01_60/ts_10352303v010101p.pdf . +etsi_etls_visibility_info() { + local jsonID="$1" + local spaces="$2" + local cert="$3" + local cert_txt="$4" + local dercert tag + local -a fingerprint=() access_description=() + local -i i j len len1 len_name nr_visnames=0 + + # If "visibility information" is present, it will appear in the subjectAltName + # extension (0603551D11) as an otherName with OID 0.4.0.3523.3.1 (060604009B430301). + # OpenSSL displays all names of type otherName as "othername:". + # As certificates will rarely include a name encoded as an otherName, check the + # text version of the certificate for "othername:" before calling + # external functions to obtain the DER encoded certficate. + if [[ "$cert_txt" =~ X509v3\ Subject\ Alternative\ Name:.*othername:\ ]]; then + dercert="$($OPENSSL x509 -in "$cert" -outform DER 2>>$ERRFILE | hexdump -v -e '16/1 "%02X"')" + if [[ "$dercert" =~ 0603551D110101FF04[0-9A-F]*060604009B430301 ]] || \ + [[ "$dercert" =~ 0603551D1104[0-9A-F]*060604009B430301 ]]; then + # Look for the beginning of the subjectAltName extension. It + # will begin with the OID (2.5.29.17 = 0603551D11). After the OID + # there may be an indication that the extension is critical (0101FF). + # Finally will be the tag indicating that the value of the extension is + # encoded as an OCTET STRING (04). + if [[ "$dercert" =~ 0603551D110101FF04 ]]; then + dercert="${dercert##*0603551D110101FF04}" + else + dercert="${dercert##*0603551D1104}" + fi + # Skip over the encoding of the length of the OCTET STRING. + if [[ "${dercert:0:1}" == 8 ]]; then + i="${dercert:1:1}" + i=2*$i+2 + dercert="${dercert:i}" + else + dercert="${dercert:2}" + fi + # Next byte should be a 30 (SEQUENCE). + if [[ "${dercert:0:2}" == 30 ]]; then + # Get the length of the subjectAltName extension and then skip + # over the encoding of the length. + if [[ "${dercert:2:1}" == 8 ]]; then + case "${dercert:3:1}" in + 1) len=2*0x${dercert:4:2}; dercert="${dercert:6}" ;; + 2) len=2*0x${dercert:4:4}; dercert="${dercert:8}" ;; + 3) len=2*0x${dercert:4:6}; dercert="${dercert:10}" ;; + *) len=0 ;; + esac + else + len=2*0x${dercert:2:2} + dercert="${dercert:4}" + fi + if [[ $len -ne 0 ]] && [[ $len -lt ${#dercert} ]]; then + # loop through all the names and extract the visibility information + for (( i=0; i < len; i=i+len_name )); do + tag="${dercert:i:2}" + i+=2 + if [[ "${dercert:i:1}" == 8 ]]; then + i+=1 + case "${dercert:i:1}" in + 1) i+=1; len_name=2*0x${dercert:i:2}; i+=2 ;; + 2) i+=1; len_name=2*0x${dercert:i:4}; i+=4 ;; + 3) i+=1; len_name=2*0x${dercert:i:6}; i+=4 ;; + *) len=0 ;; + esac + else + len_name=2*0x${dercert:i:2} + i+=2 + fi + [[ "$tag" == A0 ]] || continue + # This is an otherName. + [[ $len_name -gt 16 ]] || continue + [[ "${dercert:i:16}" == 060604009B430301 ]] || continue + # According to the OID, this is visibility information. + j=$i+16 + # Skip over the tag (A0) and length for the otherName value. + [[ "${dercert:j:2}" == A0 ]] || continue + j+=2 + if [[ "${dercert:j:1}" == 8 ]]; then + j+=1 + j+=2*0x${dercert:j:1}+1 + else + j+=2 + fi + # The value for this otherName is encoded as a SEQUENCE (30): + # VisibilityInformation ::= SEQUENCE { + # fingerprint OCTET STRING (SIZE(10)), + # accessDescription UTF8String } + [[ "${dercert:j:2}" == 30 ]] || continue + j+=2 + if [[ "${dercert:j:1}" == 8 ]]; then + j+=1 + case "${dercert:j:1}" in + 1) j+=1; len1=2*0x${dercert:j:2}; j+=2 ;; + 2) j+=1; len1=2*0x${dercert:j:4}; j+=4 ;; + 3) j+=1; len1=2*0x${dercert:j:6}; j+=6 ;; + 4) len1=0 ;; + esac + else + len1=2*0x${dercert:j:2} + j+=2 + fi + [[ $len1 -ne 0 ]] || continue + # Next is the 10-byte fingerprint, encoded as an OCTET STRING (04) + [[ "${dercert:j:4}" == 040A ]] || continue + j+=4 + fingerprint[nr_visnames]="$(asciihex_to_binary "${dercert:j:20}")" + j+=20 + # Finally comes the access description, encoded as a UTF8String (0C). + [[ "${dercert:j:2}" == 0C ]] || continue + j+=2 + if [[ "${dercert:j:1}" == "8" ]]; then + j+=1 + case "${dercert:j:1}" in + 1) j+=1; len1=2*0x${dercert:j:2}; j+=2 ;; + 2) j+=1; len1=2*0x${dercert:j:4}; j+=4 ;; + 3) j+=1; len1=2*0x${dercert:j:6}; j+=6 ;; + 4) len1=0 ;; + esac + else + len1=2*0x${dercert:j:2} + j+=2 + fi + access_description[nr_visnames]=""$(asciihex_to_binary "${dercert:j:len1}")"" + nr_visnames+=1 + done + fi + fi + fi + fi + if [[ $nr_visnames -eq 0 ]]; then + outln "not present" + fileout "$jsonID" "INFO" "not present" + else + for (( i=0; i < nr_visnames; i++ )); do + [[ $i -ne 0 ]] && out "$spaces" + outln "$(out_row_aligned_max_width "${fingerprint[i]} / ${access_description[i]}" "$spaces" $TERM_WIDTH)" + fileout "$jsonID" "INFO" "${fingerprint[i]} / ${access_description[i]}" + done + fi + return 0 +} + +# NOTE: arg3 must contain the text output of $HOSTCERT. +must_staple() { + local jsonID="cert_mustStapleExtension" + local json_postfix="$1" + local provides_stapling="$2" + local hostcert_txt="$3" + local cert extn + local -i extn_len + local supported=false + + # Note this function is only looking for status_request (5) and not + # status_request_v2 (17), since OpenSSL seems to only include status_request (5) + # in its ClientHello when the "-status" option is used. + + # OpenSSL 1.1.0 supports pretty-printing the "TLS Feature extension." For any + # previous versions of OpenSSL, OpenSSL can only show if the extension OID is present. + if grep -A 1 "TLS Feature:" <<< "$hostcert_txt" | grep -q "status_request"; then + # FIXME: This will indicate that must staple is supported if the + # certificate indicates status_request or status_request_v2. This is + # probably okay, since it seems likely that any TLS Feature extension + # that includes status_request_v2 will also include status_request. + supported=true + elif [[ "$hostcert_txt" =~ '1.3.6.1.5.5.7.1.24:' ]]; then + cert="$($OPENSSL x509 -in "$HOSTCERT" -outform DER 2>>$ERRFILE | hexdump -v -e '16/1 "%02X"')" + extn="${cert##*06082B06010505070118}" + # Check for critical bit, and skip over it if present. + [[ "${extn:0:6}" == "0101FF" ]] && extn="${extn:6}" + # Next is tag and length of extnValue OCTET STRING. Assume it is less than 128 bytes. + extn="${extn:4}" + # The TLS Feature is a SEQUENCE of INTEGER. Get the length of the SEQUENCE + extn_len=2*$(hex2dec "${extn:2:2}") + # If the extension include the status_request (5), then it supports must staple. + if [[ "${extn:4:extn_len}" =~ 020105 ]]; then + supported=true + fi + fi + + if "$supported"; then + if "$provides_stapling"; then + prln_svrty_good "supported" + fileout "${jsonID}${json_postfix}" "OK" "supported" + else + prln_svrty_high "requires OCSP stapling (NOT ok)" + fileout "${jsonID}${json_postfix}" "HIGH" "extension detected but no OCSP stapling provided" + fi + else + outln "--" + fileout "${jsonID}${json_postfix}" "INFO" "--" + fi + return 0 +} + +# TODO: This function checks for Certificate Transparency support based on RFC 6962. +# It will need to be updated to add checks for Certificate Transparency support based on 6962bis. +# return values are results, no error conditions +certificate_transparency() { + local cert_txt="$1" + local ocsp_response="$2" + local -i number_of_certificates=$3 + local cipher="$4" + local sni_used="$5" + local tls_version="$6" + local sni="" + local ciphers="" + local hexc n ciph sslver kx auth enc mac export + local extra_extns="" + local -i success + # Cipher suites that use a certifiate with an RSA (signature) public key + local -r a_rsa="cc,13, cc,15, c0,30, c0,28, c0,14, 00,9f, cc,a8, cc,aa, c0,a3, c0,9f, 00,6b, 00,39, c0,77, 00,c4, 00,88, c0,45, c0,4d, c0,53, c0,61, c0,7d, c0,8b, 16,b7, 16,b9, c0,2f, c0,27, c0,13, 00,9e, c0,a2, c0,9e, 00,67, 00,33, c0,76, 00,be, 00,9a, 00,45, c0,44, c0,4c, c0,52, c0,60, c0,7c, c0,8a, c0,11, c0,12, 00,16, 00,15, 00,14, c0,10" + # Cipher suites that use a certifiate with an RSA (encryption) public key + local -r e_rsa="00,b7, c0,99, 00,ad, cc,ae, 00,9d, c0,a1, c0,9d, 00,3d, 00,35, 00,c0, 00,84, 00,95, c0,3d, c0,51, c0,69, c0,6f, c0,7b, c0,93, ff,01, 00,ac, c0,a0, c0,9c, 00,9c, 00,3c, 00,2f, 00,ba, 00,b6, 00,96, 00,41, c0,98, 00,07, 00,94, c0,3c, c0,50, c0,68, c0,6e, c0,7a, c0,92, 00,05, 00,04, 00,92, 00,0a, 00,93, fe,ff, ff,e0, 00,62, 00,09, 00,61, fe,fe, ff,e1, 00,64, 00,60, 00,08, 00,06, 00,03, 00,b9, 00,b8, 00,2e, 00,3b, 00,02, 00,01, ff,00" + # Cipher suites that use a certifiate with a DSA public key + local -r a_dss="00,a3, 00,6a, 00,38, 00,c3, 00,87, c0,43, c0,57, c0,81, 00,a2, 00,40, 00,32, 00,bd, 00,99, 00,44, c0,42, c0,56, c0,80, 00,66, 00,13, 00,63, 00,12, 00,65, 00,11" + # Cipher suites that use a certifiate with a DH public key + local -r a_dh="00,a5, 00,a1, 00,69, 00,68, 00,37, 00,36, 00,c2, 00,c1, 00,86, 00,85, c0,3f, c0,41, c0,55, c0,59, c0,7f, c0,83, 00,a4, 00,a0, 00,3f, 00,3e, 00,31, 00,30, 00,bc, 00,bb, 00,98, 00,97, 00,43, 00,42, c0,3e, c0,40, c0,54, c0,58, c0,7e, c0,82, 00,10, 00,0d, 00,0f, 00,0c, 00,0b, 00,0e" + # Cipher suites that use a certifiate with an ECDH public key + local -r a_ecdh="c0,32, c0,2e, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, c0,4b, c0,4f, c0,5f, c0,63, c0,89, c0,8d, c0,31, c0,2d, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, c0,4a, c0,4e, c0,5e, c0,62, c0,88, c0,8c, c0,0c, c0,02, c0,0d, c0,03, c0,0b, c0,01" + # Cipher suites that use a certifiate with an ECDSA public key + local -r a_ecdsa="cc,14, c0,2c, c0,24, c0,0a, cc,a9, c0,af, c0,ad, c0,73, c0,49, c0,5d, c0,87, 16,b8, 16,ba, c0,2b, c0,23, c0,09, c0,ae, c0,ac, c0,72, c0,48, c0,5c, c0,86, c0,07, c0,08, c0,06" + # Cipher suites that use a certifiate with a GOST public key + local -r a_gost="00,80, 00,81, 00,82, 00,83" + + # First check whether signed certificate timestamps (SCT) are included in the + # server's certificate. If they aren't, check whether the server provided + # a stapled OCSP response with SCTs. If no SCTs were found in the certificate + # or OCSP response, check for an SCT TLS extension. + if [[ "$cert_txt" =~ CT\ Precertificate\ SCTs ]] || [[ "$cert_txt" =~ '1.3.6.1.4.1.11129.2.4.2' ]]; then + tm_out "certificate extension" + return 0 + fi + if [[ "$ocsp_response" =~ CT\ Certificate\ SCTs ]] || [[ "$ocsp_response" =~ '1.3.6.1.4.1.11129.2.4.5' ]]; then + tm_out "OCSP extension" + return 0 + fi + + # If the server only has one certificate, then it is sufficient to check whether + # determine_tls_extensions() discovered an SCT TLS extension. If the server has more than + # one certificate, then it is possible that an SCT TLS extension is returned for some + # certificates, but not for all of them. + if [[ $number_of_certificates -eq 1 ]] && [[ "$TLS_EXTENSIONS" =~ signed\ certificate\ timestamps ]]; then + tm_out "TLS extension" + return 0 + fi + + if [[ $number_of_certificates -gt 1 ]] && ! "$SSL_NATIVE"; then + if [[ "$tls_version" == 0304 ]]; then + ciphers=", 13,01, 13,02, 13,03, 13,04, 13,05" + if [[ "$cipher" == tls1_3_RSA ]]; then + extra_extns=", 00,0d,00,10,00,0e,08,04,08,05,08,06,04,01,05,01,06,01,02,01" + elif [[ "$cipher" == tls1_3_ECDSA ]]; then + extra_extns=", 00,0d,00,0a,00,08,04,03,05,03,06,03,02,03" + else + return 1 + fi + else + [[ "$cipher" =~ aRSA ]] && ciphers+=", $a_rsa" + [[ "$cipher" =~ eRSA ]] && ciphers+=", $e_rsa" + [[ "$cipher" =~ aDSS ]] && ciphers+=", $a_dss" + [[ "$cipher" =~ aDH ]] && ciphers+=", $a_dh" + [[ "$cipher" =~ aECDH ]] && ciphers+=", $a_ecdh" + [[ "$cipher" =~ aECDSA ]] && ciphers+=", $a_ecdsa" + [[ "$cipher" =~ aGOST ]] && ciphers+=", $a_gost" + + [[ -z "$ciphers" ]] && return 1 + ciphers+=", 00,ff" + fi + [[ -z "$sni_used" ]] && sni="$SNI" && SNI="" + tls_sockets "${tls_version:2:2}" "${ciphers:2}" "all" "00,12,00,00$extra_extns" + success=$? + [[ -z "$sni_used" ]] && SNI="$sni" + if ( [[ $success -eq 0 ]] || [[ $success -eq 2 ]] ) && \ + grep -a 'TLS server extension ' "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" | \ + grep -aq "signed certificate timestamps"; then + tm_out "TLS extension" + return 0 + fi + fi + + if [[ $SERVICE != HTTP ]] && ! "$CLIENT_AUTH"; then + # At the moment Certificate Transparency only applies to HTTPS. + tm_out "N/A" + else + tm_out "--" + fi + return 0 +} + +certificate_info() { + local proto + local -i certificate_number=$1 + local -i number_of_certificates=$2 + local cert_txt="$3" + local cipher=$4 + local cert_keysize=$5 + local cert_type="$6" + local ocsp_response_binary="$7" + local ocsp_response=$8 + local ocsp_response_status=$9 + local sni_used="${10}" + local ct="${11}" + local certificate_list_ordering_problem="${12}" + local cert_sig_algo cert_sig_hash_algo cert_key_algo cert_keyusage cert_ext_keyusage short_keyAlgo + local outok=true + local expire days2expire secs2warn ocsp_uri crl + local startdate enddate issuer_CN issuer_C issuer_O issuer sans san all_san="" cn + local issuer_DC issuerfinding cn_nosni="" + local cert_fingerprint_sha1 cert_fingerprint_sha2 cert_serial + local policy_oid + local spaces="" + local -i trust_sni=0 trust_nosni=0 diffseconds=0 + local has_dns_sans has_dns_sans_nosni + local trust_sni_finding + local -i certificates_provided + local cnfinding trustfinding trustfinding_nosni + local cnok="OK" + local expfinding expok="OK" + local -i ret=0 + local json_postfix="" # string to place at the end of JSON IDs when there is more than one certificate + local jsonID="" # string to place at beginning of JSON IDs + local indent="" + local days2warn2=$DAYS2WARN2 + local days2warn1=$DAYS2WARN1 + local provides_stapling=false + local caa_node="" all_caa="" caa_property_name="" caa_property_value="" + local response="" + local yearstart yearend clockstart clockend y m d + local gt_825=false gt_825warn=false + + if [[ $number_of_certificates -gt 1 ]]; then + [[ $certificate_number -eq 1 ]] && outln + indent=" " + out "$indent" + pr_headline "Server Certificate #$certificate_number" + [[ -z "$sni_used" ]] && pr_underline " (in response to request w/o SNI)" + outln + json_postfix=" " + spaces=" " + else + spaces=" " + fi + + GOOD_CA_BUNDLE="" + cert_sig_algo="$(awk -F':' '/Signature Algorithm/ { print $2; if (++Match >= 1) exit; }' <<< "$cert_txt")" + cert_sig_algo="${cert_sig_algo// /}" + cert_key_algo="$(awk -F':' '/Public Key Algorithm:/ { print $2; if (++Match >= 1) exit; }' <<< "$cert_txt")" + cert_key_algo="${cert_key_algo// /}" + + out "$indent" ; pr_bold " Signature Algorithm " + jsonID="cert_signatureAlgorithm" + case $cert_sig_algo in + sha1WithRSAEncryption) + pr_svrty_medium "SHA1 with RSA" + if [[ "$SERVICE" == HTTP ]] || "$ASSUME_HTTP"; then + out " -- besides: users will receive a "; pr_svrty_high "strong browser WARNING" + fi + outln + fileout "${jsonID}${json_postfix}" "MEDIUM" "SHA1 with RSA" + ;; + sha224WithRSAEncryption) + outln "SHA224 with RSA" + fileout "${jsonID}${json_postfix}" "INFO" "SHA224 with RSA" + ;; + sha256WithRSAEncryption) + prln_svrty_good "SHA256 with RSA" + fileout "${jsonID}${json_postfix}" "OK" "SHA256 with RSA" + ;; + sha384WithRSAEncryption) + prln_svrty_good "SHA384 with RSA" + fileout "${jsonID}${json_postfix}" "OK" "SHA384 with RSA" + ;; + sha512WithRSAEncryption) + prln_svrty_good "SHA512 with RSA" + fileout "${jsonID}${json_postfix}" "OK" "SHA512 with RSA" + ;; + ecdsa-with-SHA1) + prln_svrty_medium "ECDSA with SHA1" + fileout "${jsonID}${json_postfix}" "MEDIUM" "ECDSA with SHA1" + ;; + ecdsa-with-SHA224) + outln "ECDSA with SHA224" + fileout "${jsonID}${json_postfix}" "INFO" "ECDSA with SHA224" + ;; + ecdsa-with-SHA256) + prln_svrty_good "ECDSA with SHA256" + fileout "${jsonID}${json_postfix}" "OK" "ECDSA with SHA256" + ;; + ecdsa-with-SHA384) + prln_svrty_good "ECDSA with SHA384" + fileout "${jsonID}${json_postfix}" "OK" "ECDSA with SHA384" + ;; + ecdsa-with-SHA512) + prln_svrty_good "ECDSA with SHA512" + fileout "${jsonID}${json_postfix}" "OK" "ECDSA with SHA512" + ;; + dsaWithSHA1) + prln_svrty_medium "DSA with SHA1" + fileout "${jsonID}${json_postfix}" "MEDIUM" "DSA with SHA1" + ;; + dsa_with_SHA224) + outln "DSA with SHA224" + fileout "${jsonID}${json_postfix}" "INFO" "DSA with SHA224" + ;; + dsa_with_SHA256) + prln_svrty_good "DSA with SHA256" + fileout "${jsonID}${json_postfix}" "OK" "DSA with SHA256" + ;; + rsassaPss) + cert_sig_hash_algo="$(grep -A 1 "Signature Algorithm" <<< "$cert_txt" | head -2 | tail -1 | sed 's/^.*Hash Algorithm: //')" + case $cert_sig_hash_algo in + sha1) + prln_svrty_medium "RSASSA-PSS with SHA1" + fileout "${jsonID}${json_postfix}" "MEDIUM" "RSASSA-PSS with SHA1" + ;; + sha224) + outln "RSASSA-PSS with SHA224" + fileout "${jsonID}${json_postfix}" "INFO" "RSASSA-PSS with SHA224" + ;; + sha256) + prln_svrty_good "RSASSA-PSS with SHA256" + fileout "${jsonID}${json_postfix}" "OK" "RSASSA-PSS with SHA256" + ;; + sha384) + prln_svrty_good "RSASSA-PSS with SHA384" + fileout "${jsonID}${json_postfix}" "OK" "RSASSA-PSS with SHA384" + ;; + sha512) + prln_svrty_good "RSASSA-PSS with SHA512" + fileout "${jsonID}${json_postfix}" "OK" "RSASSA-PSS with SHA512" + ;; + *) + out "RSASSA-PSS with $cert_sig_hash_algo" + prln_warning " (Unknown hash algorithm)" + fileout "${jsonID}${json_postfix}" "DEBUG" "RSASSA-PSS with $cert_sig_hash_algo" + esac + ;; + md2*) + prln_svrty_critical "MD2" + fileout "${jsonID}${json_postfix}" "CRITICAL" "MD2" + ;; + md4*) + prln_svrty_critical "MD4" + fileout "${jsonID}${json_postfix}" "CRITICAL" "MD4" + ;; + md5*) + prln_svrty_critical "MD5" + fileout "${jsonID}${json_postfix}" "CRITICAL" "MD5" + ;; + *) + out "$cert_sig_algo (" + pr_warning "FIXME: can't tell whether this is good or not" + outln ")" + fileout "${jsonID}${json_postfix}" "DEBUG" "$cert_sig_algo" + ((ret++)) + ;; + esac + # old, but still interesting: https://blog.hboeck.de/archives/754-Playing-with-the-EFF-SSL-Observatory.html + + out "$indent"; pr_bold " Server key size " + jsonID="cert_keySize" + if [[ -z "$cert_keysize" ]]; then + outln "(couldn't determine)" + fileout "${jsonID}${json_postfix}" "cannot be determined" + ((ret++)) + else + case $cert_key_algo in + *RSA*|*rsa*) short_keyAlgo="RSA";; + *ecdsa*|*ecPublicKey) short_keyAlgo="EC";; + *DSA*|*dsa*) short_keyAlgo="DSA";; + *GOST*|*gost*) short_keyAlgo="GOST";; + *dh*|*DH*) short_keyAlgo="DH" ;; + *) pr_fixme "don't know $cert_key_algo " + let ret++ ;; + esac + out "$short_keyAlgo " + # https://tools.ietf.org/html/rfc4492, https://www.keylength.com/en/compare/ + # https://infoscience.epfl.ch/record/164526/files/NPDF-22.pdf + # see https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-4/final + # Table 2 @ chapter 5.6.1 (~ p66) + if [[ $cert_key_algo =~ ecdsa ]] || [[ $cert_key_algo =~ ecPublicKey ]]; then + if [[ "$cert_keysize" -le 110 ]]; then # a guess + pr_svrty_critical "$cert_keysize" + fileout "${jsonID}${json_postfix}" "CRITICAL" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 123 ]]; then # a guess + pr_svrty_high "$cert_keysize" + fileout "${jsonID}${json_postfix}" "HIGH" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 163 ]]; then + pr_svrty_medium "$cert_keysize" + fileout "${jsonID}${json_postfix}" "MEDIUM" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 224 ]]; then + out "$cert_keysize" + fileout "${jsonID}${json_postfix}" "INFO" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 533 ]]; then + pr_svrty_good "$cert_keysize" + fileout "${jsonID}${json_postfix}" "OK" "$short_keyAlgo $cert_keysize bits" + else + out "keysize: $cert_keysize (not expected, FIXME)" + fileout "${jsonID}${json_postfix}" "DEBUG" " $cert_keysize bits (not expected)" + ((ret++)) + fi + outln " bits" + elif [[ $cert_key_algo =~ RSA ]] || [[ $cert_key_algo =~ rsa ]] || [[ $cert_key_algo =~ dsa ]] || \ + [[ $cert_key_algo =~ dhKeyAgreement ]] || [[ $cert_key_algo == X9.42\ DH ]]; then + if [[ "$cert_keysize" -le 512 ]]; then + pr_svrty_critical "$cert_keysize" + outln " bits" + fileout "${jsonID}${json_postfix}" "CRITICAL" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 768 ]]; then + pr_svrty_high "$cert_keysize" + outln " bits" + fileout "${jsonID}${json_postfix}" "HIGH" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 1024 ]]; then + pr_svrty_medium "$cert_keysize" + outln " bits" + fileout "${jsonID}${json_postfix}" "MEDIUM" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 2048 ]]; then + outln "$cert_keysize bits" + fileout "${jsonID}${json_postfix}" "INFO" "$short_keyAlgo $cert_keysize bits" + elif [[ "$cert_keysize" -le 4096 ]]; then + pr_svrty_good "$cert_keysize" + fileout "${jsonID}${json_postfix}" "OK" "$short_keyAlgo $cert_keysize bits" + outln " bits" + else + pr_warning "weird key size: $cert_keysize bits"; outln " (could cause compatibility problems)" + fileout "${jsonID}${json_postfix}" "WARN" "$cert_keysize bits (Odd)" + ((ret++)) + fi + else + out "$cert_key_algo + $cert_keysize bits (" + pr_warning "FIXME: can't tell whether this is good or not" + outln ")" + fileout "${jsonID}${json_postfix}" "WARN" "Server keys $cert_keysize bits, unknown public key algorithm $cert_key_algo" + ((ret++)) + fi + fi + + out "$indent"; pr_bold " Server key usage "; + outok=true + jsonID="cert_keyUsage" + cert_keyusage="$(strip_leading_space "$(awk '/X509v3 Key Usage:/ { getline; print $0 }' <<< "$cert_txt")")" + if [[ -n "$cert_keyusage" ]]; then + outln "$cert_keyusage" + if ( [[ " $cert_type " =~ " RSASig " ]] || [[ " $cert_type " =~ " DSA " ]] || [[ " $cert_type " =~ " ECDSA " ]] ) && \ + [[ ! "$cert_keyusage" =~ "Digital Signature" ]]; then + prln_svrty_high "$indent Certificate incorrectly used for digital signatures" + fileout "${jsonID}${json_postfix}" "HIGH" "Certificate incorrectly used for digital signatures: \"$cert_keyusage\"" + outok=false + fi + if [[ " $cert_type " =~ " RSAKMK " ]] && [[ ! "$cert_keyusage" =~ "Key Encipherment" ]]; then + prln_svrty_high "$indent Certificate incorrectly used for key encipherment" + fileout "${jsonID}${json_postfix}" "HIGH" "Certificate incorrectly used for key encipherment: \"$cert_keyusage\"" + outok=false + fi + if ( [[ " $cert_type " =~ " DH " ]] || [[ " $cert_type " =~ " ECDH " ]] ) && \ + [[ ! "$cert_keyusage" =~ "Key Agreement" ]]; then + prln_svrty_high "$indent Certificate incorrectly used for key agreement" + fileout "${jsonID}${json_postfix}" "HIGH" "Certificate incorrectly used for key agreement: \"$cert_keyusage\"" + outok=false + fi + else + outln "--" + fileout "${jsonID}${json_postfix}" "INFO" "No server key usage information" + outok=false + fi + if "$outok"; then + fileout "${jsonID}${json_postfix}" "INFO" "$cert_keyusage" + fi + + out "$indent"; pr_bold " Server extended key usage "; + jsonID="cert_extKeyUsage" + outok=true + cert_ext_keyusage="$(strip_leading_space "$(awk '/X509v3 Extended Key Usage:/ { getline; print $0 }' <<< "$cert_txt")")" + if [[ -n "$cert_ext_keyusage" ]]; then + outln "$cert_ext_keyusage" + if [[ ! "$cert_ext_keyusage" =~ "TLS Web Server Authentication" ]] && [[ ! "$cert_ext_keyusage" =~ "Any Extended Key Usage" ]]; then + prln_svrty_high "$indent Certificate incorrectly used for TLS Web Server Authentication" + fileout "${jsonID}${json_postfix}" "HIGH" "Certificate incorrectly used for TLS Web Server Authentication: \"$cert_ext_keyusage\"" + outok=false + fi + else + outln "--" + fileout "${jsonID}${json_postfix}" "INFO" "No server extended key usage information" + outok=false + fi + if "$outok"; then + fileout "${jsonID}${json_postfix}" "INFO" "cert_ext_keyusage" + fi + + out "$indent"; pr_bold " Serial / Fingerprints " + cert_serial="$($OPENSSL x509 -noout -in $HOSTCERT -serial 2>>$ERRFILE | sed 's/serial=//')" + fileout "cert_serialNumber${json_postfix}" "INFO" "$cert_serial" + + cert_fingerprint_sha1="$($OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha1 2>>$ERRFILE | sed 's/Fingerprint=//' | sed 's/://g')" + fileout "cert_fingerprintSHA1${json_postfix}" "INFO" "${cert_fingerprint_sha1//SHA1 /}" + outln "$cert_serial / $cert_fingerprint_sha1" + + cert_fingerprint_sha2="$($OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha256 2>>$ERRFILE | sed 's/Fingerprint=//' | sed 's/://g' )" + fileout "cert_fingerprintSHA256${json_postfix}" "INFO" "${cert_fingerprint_sha2//SHA256 /}" + outln "$spaces$cert_fingerprint_sha2" + + # " " needs to be converted back to lf in JSON/CSV output + fileout "cert${json_postfix}" "INFO" "$(< $HOSTCERT)" + + [[ -z $CERT_FINGERPRINT_SHA2 ]] && \ + CERT_FINGERPRINT_SHA2="$cert_fingerprint_sha2" || + CERT_FINGERPRINT_SHA2="$cert_fingerprint_sha2 $CERT_FINGERPRINT_SHA2" + [[ -z $RSA_CERT_FINGERPRINT_SHA2 ]] && \ + ( [[ $cert_key_algo = *RSA* ]] || [[ $cert_key_algo = *rsa* ]] ) && + RSA_CERT_FINGERPRINT_SHA2="$cert_fingerprint_sha2" + + out "$indent"; pr_bold " Common Name (CN) " + cnfinding="Common Name (CN) : " + cn="$(get_cn_from_cert $HOSTCERT)" + if [[ -n "$cn" ]]; then + pr_italic "$cn" + cnfinding="$cn" + else + cn="no CN field in subject" + out "($cn)" + cnfinding="$cn" + cnok="INFO" + fi + fileout "cert_commonName${json_postfix}" "$cnok" "$cnfinding" + cnfinding="" + + if [[ -n "$sni_used" ]]; then + if grep -q "\-\-\-\-\-BEGIN" "$HOSTCERT.nosni"; then + cn_nosni="$(get_cn_from_cert "$HOSTCERT.nosni")" + [[ -z "$cn_nosni" ]] && cn_nosni="no CN field in subject" + fi + debugme tm_out "\"$NODE\" | \"$cn\" | \"$cn_nosni\"" + else + debugme tm_out "\"$NODE\" | \"$cn\"" + fi + + if [[ -z "$sni_used" ]] || [[ "$(toupper "$cn_nosni")" == "$(toupper "$cn")" ]]; then + outln + cnfinding="$cn" + elif [[ -z "$cn_nosni" ]]; then + out " (request w/o SNI didn't succeed"; + cnfinding+="request w/o SNI didn't succeed" + if [[ $cert_sig_algo =~ ecdsa ]]; then + out ", usual for EC certificates" + cnfinding+=", usual for EC certificates" + fi + outln ")" + cnfinding+="" + elif [[ "$cn_nosni" == *"no CN field"* ]]; then + outln ", (request w/o SNI: $cn_nosni)" + cnfinding="$cn_nosni" + else + out " (CN in response to request w/o SNI: "; pr_italic "$cn_nosni"; outln ")" + cnfinding="$cn_nosni" + fi + fileout "cert_commonName_wo_SNI${json_postfix}" "INFO" "$cnfinding" + + sans=$(grep -A2 "Subject Alternative Name" <<< "$cert_txt" | \ + grep -E "DNS:|IP Address:|email:|URI:|DirName:|Registered ID:" | tr ',' '\n' | \ + sed -e 's/ *DNS://g' -e 's/ *IP Address://g' -e 's/ *email://g' -e 's/ *URI://g' -e 's/ *DirName://g' \ + -e 's/ *Registered ID://g' \ + -e 's/ *othername://g' -e 's/ *X400Name://g' -e 's/ *EdiPartyName://g') + # ^^^ CACert + + out "$indent"; pr_bold " subjectAltName (SAN) " + jsonID="cert_subjectAltName" + if [[ -n "$sans" ]]; then + while read san; do + [[ -n "$san" ]] && all_san+="$san " + done <<< "$sans" + prln_italic "$(out_row_aligned_max_width "$all_san" "$indent " $TERM_WIDTH)" + fileout "${jsonID}${json_postfix}" "INFO" "$all_san" + else + if [[ $SERVICE == "HTTP" ]] || "$ASSUME_HTTP"; then + pr_svrty_high "missing (NOT ok)"; outln " -- Browsers are complaining" + fileout "${jsonID}${json_postfix}" "HIGH" "No SAN, browsers are complaining" + else + pr_svrty_medium "missing"; outln " -- no SAN is deprecated" + fileout "${jsonID}${json_postfix}" "MEDIUM" "Providing no SAN is deprecated" + fi + fi + + out "$indent"; pr_bold " Issuer " + jsonID="cert_caIssuers" + #FIXME: oid would be better maybe (see above) + issuer="$($OPENSSL x509 -in $HOSTCERT -noout -issuer -nameopt multiline,-align,sname,-esc_msb,utf8,-space_eq 2>>$ERRFILE)" + issuer_CN="$(awk -F'=' '/CN=/ { print $2 }' <<< "$issuer")" + issuer_O="$(awk -F'=' '/O=/ { print $2 }' <<< "$issuer")" + issuer_C="$(awk -F'=' '/ C=/ { print $2 }' <<< "$issuer")" + issuer_DC="$(awk -F'=' '/DC=/ { print $2 }' <<< "$issuer")" + + if [[ "$issuer_O" == "issuer=" ]] || [[ "$issuer_O" == "issuer= " ]] || [[ "$issuer_CN" == "$cn" ]]; then + prln_svrty_critical "self-signed (NOT ok)" + fileout "${jsonID}${json_postfix}" "CRITICAL" "selfsigned" + else + issuerfinding="$issuer_CN" + pr_italic "$issuer_CN" + if [[ -z "$issuer_O" ]] && [[ -n "$issuer_DC" ]]; then + for san in $issuer_DC; do + if [[ -z "$issuer_O" ]]; then + issuer_O="${san}" + else + issuer_O="${san}.${issuer_O}" + fi + done + fi + if [[ -n "$issuer_O" ]]; then + issuerfinding+=" (" + out " (" + issuerfinding+="$issuer_O" + pr_italic "$issuer_O" + if [[ -n "$issuer_C" ]]; then + issuerfinding+=" from " + out " from " + issuerfinding+="$issuer_C" + pr_italic "$issuer_C" + fi + issuerfinding+=")" + out ")" + fi + outln + fileout "${jsonID}${json_postfix}" "INFO" "$issuerfinding" + fi + + out "$indent"; pr_bold " Trust (hostname) " + compare_server_name_to_cert "$HOSTCERT" + trust_sni=$? + + # Find out if the subjectAltName extension is present and contains + # a DNS name, since Section 6.3 of RFC 6125 says: + # Security Warning: A client MUST NOT seek a match for a reference + # identifier of CN-ID if the presented identifiers include a DNS-ID, + # SRV-ID, URI-ID, or any application-specific identifier types + # supported by the client. + has_dns_sans=$HAS_DNS_SANS + + case $trust_sni in + 0) trustfinding="certificate does not match supplied URI" ;; + 1) trustfinding="Ok via SAN" ;; + 2) trustfinding="Ok via SAN wildcard" ;; + 4) if "$has_dns_sans"; then + trustfinding="via CN, but not SAN" + else + trustfinding="via CN only" + fi + ;; + 5) trustfinding="Ok via SAN and CN" ;; + 6) trustfinding="Ok via SAN wildcard and CN" + ;; + 8) if "$has_dns_sans"; then + trustfinding="via CN wildcard, but not SAN" + else + trustfinding="via CN (wildcard) only" + fi + ;; + 9) trustfinding="Ok via CN wildcard and SAN" + ;; + 10) trustfinding="Ok via SAN wildcard and CN wildcard" + ;; + esac + + if [[ $trust_sni -eq 0 ]]; then + pr_svrty_high "$trustfinding" + trust_sni_finding="HIGH" + elif ( [[ $trust_sni -eq 4 ]] || [[ $trust_sni -eq 8 ]] ); then + if [[ $SERVICE == "HTTP" ]] || "$ASSUME_HTTP"; then + # https://bugs.chromium.org/p/chromium/issues/detail?id=308330 + # https://bugzilla.mozilla.org/show_bug.cgi?id=1245280 + # https://www.chromestatus.com/feature/4981025180483584 + pr_svrty_high "$trustfinding"; out " -- Browsers are complaining" + trust_sni_finding="HIGH" + else + pr_svrty_medium "$trustfinding" + trust_sni_finding="MEDIUM" + # we punish CN matching for non-HTTP as it is deprecated https://tools.ietf.org/html/rfc2818#section-3.1 + ! "$has_dns_sans" && out " -- CN only match is deprecated" + fi + else + pr_svrty_good "$trustfinding" + trust_sni_finding="OK" + fi + + if [[ -n "$cn_nosni" ]]; then + compare_server_name_to_cert "$HOSTCERT.nosni" + trust_nosni=$? + has_dns_sans_nosni=$HAS_DNS_SANS + fi + + # See issue #733. + if [[ -z "$sni_used" ]]; then + trustfinding_nosni="" + elif ( [[ $trust_sni -eq $trust_nosni ]] && [[ "$has_dns_sans" == "$has_dns_sans_nosni" ]] ) || \ + ( [[ $trust_sni -eq 0 ]] && [[ $trust_nosni -eq 0 ]] ); then + trustfinding_nosni=" (same w/o SNI)" + elif [[ $trust_nosni -eq 0 ]]; then + if [[ $trust_sni -eq 4 ]] || [[ $trust_sni -eq 8 ]]; then + trustfinding_nosni=" (w/o SNI: certificate does not match supplied URI)" + else + trustfinding_nosni=" (SNI mandatory)" + fi + elif [[ $trust_nosni -eq 4 ]] || [[ $trust_nosni -eq 8 ]] || [[ $trust_sni -eq 4 ]] || [[ $trust_sni -eq 8 ]]; then + case $trust_nosni in + 1) trustfinding_nosni="(w/o SNI: Ok via SAN)" ;; + 2) trustfinding_nosni="(w/o SNI: Ok via SAN wildcard)" ;; + 4) if "$has_dns_sans_nosni"; then + trustfinding_nosni="(w/o SNI: via CN, but not SAN)" + else + trustfinding_nosni="(w/o SNI: via CN only)" + fi + ;; + 5) trustfinding_nosni="(w/o SNI: Ok via SAN and CN)" ;; + 6) trustfinding_nosni="(w/o SNI: Ok via SAN wildcard and CN)" ;; + 8) if "$has_dns_sans_nosni"; then + trustfinding_nosni="(w/o SNI: via CN wildcard, but not SAN)" + else + trustfinding_nosni="(w/o SNI: via CN (wildcard) only)" + fi + ;; + 9) trustfinding_nosni="(w/o SNI: Ok via CN wildcard and SAN)" ;; + 10) trustfinding_nosni="(w/o SNI: Ok via SAN wildcard and CN wildcard)" ;; + esac + elif [[ $trust_sni -ne 0 ]]; then + trustfinding_nosni=" (works w/o SNI)" + else + trustfinding_nosni=" (however, works w/o SNI)" + fi + if [[ -n "$sni_used" ]] || [[ $trust_nosni -eq 0 ]] || ( [[ $trust_nosni -ne 4 ]] && [[ $trust_nosni -ne 8 ]] ); then + outln "$trustfinding_nosni" + elif [[ $SERVICE == "HTTP" ]] || "$ASSUME_HTTP"; then + prln_svrty_high "$trustfinding_nosni" + else + prln_svrty_medium "$trustfinding_nosni" + fi + + fileout "cert_trust${json_postfix}" "$trust_sni_finding" "${trustfinding}${trustfinding_nosni}" + + out "$indent"; pr_bold " Chain of trust"; out " " + jsonID="cert_chain_of_trust" + if [[ "$issuer_O" =~ StartCom ]] || [[ "$issuer_O" =~ WoSign ]] || [[ "$issuer_CN" =~ StartCom ]] || [[ "$issuer_CN" =~ WoSign ]]; then + # Shortcut for this special case here. + pr_italic "WoSign/StartCom"; out " are " ; prln_svrty_critical "not trusted anymore (NOT ok)" + fileout "${jsonID}${json_postfix}" "CRITICAL" "Issuer not trusted anymore (WoSign/StartCom)" + else + # Also handles fileout, keep error if happened + determine_trust "$jsonID" "$json_postfix" || ((ret++)) + fi + + # https://events.ccc.de/congress/2010/Fahrplan/attachments/1777_is-the-SSLiverse-a-safe-place.pdf, see page 40pp + out "$indent"; pr_bold " EV cert"; out " (experimental) " + jsonID="cert_certificatePolicies_EV" + # only the first one, seldom we have two + policy_oid=$(awk '/ .Policy: / { print $2 }' <<< "$cert_txt" | awk 'NR < 2') + if grep -Eq 'Extended Validation|Extended Validated|EV SSL|EV CA' <<< "$issuer" || \ + [[ 2.16.840.1.114028.10.1.2 == "$policy_oid" ]] || \ + [[ 2.16.840.1.114412.1.3.0.2 == "$policy_oid" ]] || \ + [[ 2.16.840.1.114412.2.1 == "$policy_oid" ]] || \ + [[ 2.16.578.1.26.1.3.3 == "$policy_oid" ]] || \ + [[ 1.3.6.1.4.1.17326.10.14.2.1.2 == "$policy_oid" ]] || \ + [[ 1.3.6.1.4.1.17326.10.8.12.1.2 == "$policy_oid" ]] || \ + [[ 1.3.6.1.4.1.13177.10.1.3.10 == "$policy_oid" ]] ; then + out "yes " + fileout "${jsonID}${json_postfix}" "OK" "yes" + else + out "no " + fileout "${jsonID}${json_postfix}" "INFO" "no" + fi + debugme echo "($(newline_to_spaces "$policy_oid"))" + outln +#TODO: check browser OIDs: +# https://mxr.mozilla.org/mozilla-central/source/security/certverifier/ExtendedValidation.cpp +# https://chromium.googlesource.com/chromium/chromium/+/master/net/base/ev_root_ca_metadata.cc +# https://certs.opera.com/03/ev-oids.xml +# see #967 + + out "$indent"; pr_bold " ETS/\"eTLS\"" + out ", visibility info " + jsonID="cert_eTLS" + etsi_etls_visibility_info "$jsonID" "$spaces" "$HOSTCERT" "$cert_txt" + # *Currently* this is even listed as a vulnerability (CWE-310, CVE-2019-919), see + # https://nvd.nist.gov/vuln/detail/CVE-2019-9191, https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9191 + # For now we leave this here. We may want to change that later or add infos to other sections (PFS & vulnerability) + + out "$indent"; pr_bold " Certificate Validity (UTC) " + # FreeBSD + OSX can't swallow the leading blank: + startdate="${cert_txt#*Validity*Not Before: }" + startdate="${startdate%%GMT*}GMT" + enddate="${cert_txt#*Validity*Not Before: *Not After : }" + enddate="${enddate%%GMT*}GMT" + debugme echo "$enddate - $startdate" + # Now we have a normalized enddate and startdate like "Feb 27 10:03:20 2017 GMT" -- also for OpenBSD + if "$HAS_OPENBSDDATE"; then + # Best we want to do under old versions of OpenBSD, first just remove the GMT and keep start/endate for later output + startdate="$(parse_date "$startdate" "+%s")" + enddate="$(parse_date "$enddate" "+%s")" + # Now we extract a date block and a time block which we need for later output + startdate="$(parse_date "$startdate" +"%F %H:%M" "%b %d %T %Y %Z")" + enddate="$(parse_date "$enddate" +"%F %H:%M" "%b %d %T %Y %Z")" + read yearstart clockstart <<< "$startdate" + read yearend clockend <<< "$enddate" + debugme echo "$yearstart, $clockstart" + debugme echo "$yearend, $clockend" + y=$(( ${yearend:0:4} - ${yearstart:0:4} )) + m=$(( ${yearend:5:1} - ${yearstart:5:1} + ${yearend:6:1} - ${yearstart:6:1} )) + d=$(( ${yearend:8:2} - ${yearstart:8:2} )) + # We take the year, month, days here as old OpenBSD's date is too difficult for real conversion + # see comment in parse_date(). In diffseconds then we have the estimated absolute validity period + diffseconds=$(( d + ((m*30)) + ((y*365)) )) + diffseconds=$((diffseconds * 3600 * 24)) + # Now we estimate the days left plus length of month/year: + yearnow="$(date -juz GMT "+%Y-%m-%d %H:%M")" + y=$(( ${yearend:0:4} - ${yearnow:0:4} )) + m=$(( ${yearend:5:1} - ${yearnow:5:1} + ${yearend:6:1} - ${yearnow:6:1} )) + d=$(( ${yearend:8:2} - ${yearnow:8:2} )) + days2expire=$(( d + ((m*30)) + ((y*365)) )) + else + startdate="$(parse_date "$startdate" +"%F %H:%M" "%b %d %T %Y %Z")" + enddate="$(parse_date "$enddate" +"%F %H:%M" "%b %d %T %Y %Z")" + days2expire=$(( $(parse_date "$enddate" "+%s" $'%F %H:%M') - $(LC_ALL=C date "+%s") )) # first in seconds + days2expire=$((days2expire / 3600 / 24 )) + diffseconds=$(( $(parse_date "$enddate" "+%s" $'%F %H:%M') - $(parse_date "$startdate" "+%s" $'%F %H:%M') )) + fi + # We adjust the thresholds by %50 for LE certificates, relaxing warnings for those certificates. + # . instead of \' because it does not break syntax highlighting in vim + if [[ "$issuer_CN" =~ ^Let.s\ Encrypt\ Authority ]] ; then + days2warn2=$((days2warn2 / 2)) + days2warn1=$((days2warn1 / 2)) + fi + + debugme echo -n "diffseconds: $diffseconds" + expire=$($OPENSSL x509 -in $HOSTCERT -checkend 1 2>>$ERRFILE) + if ! grep -qw not <<< "$expire" ; then + pr_svrty_critical "expired" + expfinding="expired" + expok="CRITICAL" + else + secs2warn=$((24 * 60 * 60 * days2warn2)) # low threshold first + expire=$($OPENSSL x509 -in $HOSTCERT -checkend $secs2warn 2>>$ERRFILE) + if grep -qw not <<< "$expire"; then + secs2warn=$((24 * 60 * 60 * days2warn1)) # high threshold + expire=$($OPENSSL x509 -in $HOSTCERT -checkend $secs2warn 2>>$ERRFILE) + if grep -qw not <<< "$expire"; then + pr_svrty_good "$days2expire >= $days2warn1 days" + expfinding+="$days2expire >= $days2warn1 days" + else + pr_svrty_medium "expires < $days2warn1 days ($days2expire)" + expfinding+="expires < $days2warn1 days ($days2expire)" + expok="MEDIUM" + fi + else + pr_svrty_high "expires < $days2warn2 days ($days2expire)" + expfinding+="expires < $days2warn2 days ($days2expire)" + expok="HIGH" + fi + fi + outln " ($startdate --> $enddate)" + fileout "cert_expirationStatus${json_postfix}" "$expok" "$expfinding" + fileout "cert_notBefore${json_postfix}" "INFO" "$startdate" # we assume that the certificate has no start time in the future + fileout "cert_notAfter${json_postfix}" "$expok" "$enddate" # They are in UTC + + # Internal certificates or those from appliances often have too high validity periods. + # We check for ~10 years and >~ 5 years + if [[ $diffseconds -ge $((3600 * 24 * 365 * 10)) ]]; then + out "$spaces" + prln_svrty_high ">= 10 years is way too long" + fileout "cert_validityPeriod${json_postfix}" "HIGH" "$((diffseconds / (3600 * 24) )) days" + elif [[ $diffseconds -ge $((3600 * 24 * 365 * 5)) ]]; then + out "$spaces" + prln_svrty_medium ">= 5 years is too long" + fileout "cert_validityPeriod${json_postfix}" "MEDIUM" "$((diffseconds / (3600 * 24) )) days" + elif [[ $diffseconds -ge $((3600 * 24 * 825 + 1)) ]]; then + # Also "official" certificates issued from March 1st, 2018 (1517353200) aren't supposed + # to be valid longer than 825 days which is 1517353200 in epoch seconds + gt_825=true + if "$HAS_OPENBSDDATE"; then + if [[ 20180301 -le ${yearstart//-/} ]]; then + gt_825warn=true + fi + elif [[ $(parse_date "$startdate" "+%s" $'%F %H:%M') -ge 1517353200 ]]; then + gt_825warn=true + fi + # Now, the verdict, depending on the issuing date + out "$spaces" + if "$gt_825warn" && "$gt_825"; then + prln_svrty_medium "> 825 days issued after 2018/03/01 is too long" + fileout "cert_validityPeriod${json_postfix}" "MEDIUM" "$((diffseconds / (3600 * 24) )) > 825 days" + elif "$gt_825"; then + outln ">= 825 days certificate life time but issued before 2018/03/01" + fileout "cert_validityPeriod${json_postfix}" "INFO" "$((diffseconds / (3600 * 24) )) =< 825 days" + fi + else + # All is fine with valididy period + # We ignore for now certificates < 2018/03/01. On the screen we only show debug info + [[ "$DEBUG" -ge 1 ]] && outln "${spaces}DEBUG: all is fine with total certificate life time" + fileout "cert_validityPeriod${json_postfix}" "INFO" "No finding" + fi + + certificates_provided=1+$(grep -c "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR/intermediatecerts.pem) + out "$indent"; pr_bold " # of certificates provided"; out " $certificates_provided" + fileout "certs_countServer${json_postfix}" "INFO" "${certificates_provided}" + if "$certificate_list_ordering_problem"; then + prln_svrty_low " (certificate list ordering problem)" + fileout "certs_list_ordering_problem${json_postfix}" "LOW" "yes" + else + fileout "certs_list_ordering_problem${json_postfix}" "INFO" "no" + outln + fi + + if "$PHONE_OUT"; then + out "$indent"; pr_bold " In pwnedkeys.com DB " + check_pwnedkeys "$HOSTCERT" "$cert_key_algo" "$cert_keysize" + case "$?" in + 0) outln "not checked"; fileout "pwnedkeys${json_postfix}" "INFO" "not checked" ;; + 1) outln "not in database"; fileout "pwnedkeys${json_postfix}" "INFO" "not in database" ;; + 2) pr_svrty_critical "NOT ok --"; outln " key appears in database"; fileout "pwnedkeys${json_postfix}" "CRITICAL" "private key is known" ;; + 7) prln_warning "error querying https://v1.pwnedkeys.com"; fileout "pwnedkeys${json_postfix}" "WARN" "connection error" ;; + esac + fi + + out "$indent"; pr_bold " Certificate Revocation List " + jsonID="cert_crlDistributionPoints" + # ~ get next 50 lines after pattern , strip until Signature Algorithm and retrieve URIs + crl="$(awk '/X509v3 CRL Distribution/{i=50} i&&i--' <<< "$cert_txt" | awk '/^$/,/^ [a-zA-Z0-9]+|^ Signature Algorithm:/' | awk -F'URI:' '/URI/ { print $2 }')" + if [[ -z "$crl" ]] ; then + fileout "${jsonID}${json_postfix}" "INFO" "--" + outln "--" + else + if [[ $(count_lines "$crl") -eq 1 ]]; then + out "$crl" + if [[ "$expfinding" != "expired" ]]; then + check_revocation_crl "$crl" "cert_crlRevoked${json_postfix}" + ret=$((ret +$?)) + fi + outln + else # more than one CRL + first_crl=true + while read -r line; do + if "$first_crl"; then + first_crl=false + else + out "$spaces" + fi + out "$line" + if [[ "$expfinding" != expired ]]; then + check_revocation_crl "$line" "cert_crlRevoked${json_postfix}" + ret=$((ret +$?)) + fi + outln + done <<< "$crl" + fi + fileout "${jsonID}${json_postfix}" "INFO" "$crl" + fi + + out "$indent"; pr_bold " OCSP URI " + jsonID="cert_ocspURL" + ocsp_uri=$($OPENSSL x509 -in $HOSTCERT -noout -ocsp_uri 2>>$ERRFILE) + if [[ -z "$ocsp_uri" ]]; then + outln "--" + fileout "${jsonID}${json_postfix}" "INFO" "--" + else + if [[ $(count_lines "$ocsp_uri") -eq 1 ]]; then + out "$ocsp_uri" + if [[ "$expfinding" != "expired" ]]; then + check_revocation_ocsp "$ocsp_uri" "" "cert_ocspRevoked${json_postfix}" + fi + ret=$((ret +$?)) + outln + else + first_ocsp=true + while read -r line; do + if "$first_ocsp"; then + first_ocsp=false + else + out "$spaces" + fi + out "$line" + if [[ "$expfinding" != "expired" ]]; then + check_revocation_ocsp "$line" "" "cert_ocspRevoked${json_postfix}" + ret=$((ret +$?)) + fi + outln + done <<< "$ocsp_uri" + fi + fileout "${jsonID}${json_postfix}" "INFO" "$ocsp_uri" + fi + if [[ -z "$ocsp_uri" ]] && [[ -z "$crl" ]]; then + out "$spaces" + pr_svrty_high "NOT ok --" + outln " neither CRL nor OCSP URI provided" + fileout "cert_revocation${json_postfix}" "HIGH" "Neither CRL nor OCSP URI provided" + fi + + out "$indent"; pr_bold " OCSP stapling " + jsonID="OCSP_stapling" + if grep -a "OCSP response" <<< "$ocsp_response" | grep -q "no response sent" ; then + if [[ -n "$ocsp_uri" ]]; then + pr_svrty_low "not offered" + fileout "${jsonID}${json_postfix}" "LOW" "not offered" + else + out "not offered" + fileout "${jsonID}${json_postfix}" "INFO" "not offered" + fi + else + if grep -a "OCSP Response Status" <<< "$ocsp_response_status" | grep -q successful; then + pr_svrty_good "offered" + fileout "${jsonID}${json_postfix}" "OK" "offered" + provides_stapling=true + check_revocation_ocsp "" "$ocsp_response_binary" "cert_ocspRevoked${json_postfix}" + elif [[ "$ocsp_response" =~ Responder\ Error: ]]; then + response="$(awk '/Responder Error:/ { print $3 }' <<< "$ocsp_response")" + pr_warning "stapled OCSP response contained an error response from OCSP responder: $response" + fileout "${jsonID}${json_postfix}" "WARN" "stapled OCSP response contained an error response from OCSP responder: $response" + else + if $GOST_STATUS_PROBLEM; then + pr_warning "(GOST servers make problems here, sorry)" + fileout "${jsonID}${json_postfix}" "WARN" "(The GOST server made a problem here, sorry)" + ((ret++)) + else + out "(response status unknown)" + fileout "${jsonID}${json_postfix}" "OK" " not sure what's going on here, '$ocsp_response'" + debugme grep -a -A20 -B2 "OCSP response" <<<"$ocsp_response" + ((ret++)) + fi + fi + fi + outln + + out "$indent"; pr_bold " OCSP must staple extension "; + must_staple "$json_postfix" "$provides_stapling" "$cert_txt" + + out "$indent"; pr_bold " DNS CAA RR"; out " (experimental) " + jsonID="DNS_CAArecord" + caa_node="$NODE" + caa="" + while ( [[ -z "$caa" ]] && [[ ! -z "$caa_node" ]] ); do + caa="$(get_caa_rr_record $caa_node)" + [[ $caa_node =~ '.'$ ]] || caa_node+="." + caa_node=${caa_node#*.} + done + if [[ -n "$caa" ]]; then + pr_svrty_good "available"; out " - please check for match with \"Issuer\" above" + if [[ $(count_lines "$caa") -eq 1 ]]; then + out ": " + else + outln; out "$spaces" + fi + while read caa; do + if [[ -n "$caa" ]]; then + all_caa+="$caa, " + fi + done <<< "$caa" + all_caa=${all_caa%, } # strip trailing comma + pr_italic "$(out_row_aligned_max_width "$all_caa" "$indent " $TERM_WIDTH)" + fileout "${jsonID}${json_postfix}" "OK" "$all_caa" + elif [[ -n "$NODNS" ]]; then + out "(instructed to minimize DNS queries)" + fileout "${jsonID}${json_postfix}" "INFO" "check skipped as instructed" + else + pr_svrty_low "not offered" + fileout "${jsonID}${json_postfix}" "LOW" "--" + fi + outln + + out "$indent"; pr_bold " Certificate Transparency "; + jsonID="certificate_transparency" + if [[ "$ct" =~ extension ]]; then + pr_svrty_good "yes"; outln " ($ct)" + fileout "${jsonID}${json_postfix}" "OK" "yes ($ct)" + else + outln "$ct" + fileout "${jsonID}${json_postfix}" "INFO" "$ct" + fi + outln + return $ret +} + +run_server_defaults() { + local ciph newhostcert sni + local match_found + local sessticket_lifetime_hint="" sessticket_proto="" lifetime unit + local -i i n + local -i certs_found=0 + local -i ret=0 + local -a previous_hostcert previous_hostcert_txt previous_hostcert_type + local -a previous_hostcert_issuer previous_intermediates previous_ordering_problem keysize cipher + local -a ocsp_response_binary ocsp_response ocsp_response_status sni_used tls_version ct + local -a ciphers_to_test certificate_type + local -a -i success + local cn_nosni cn_sni sans_nosni sans_sni san tls_extensions + local using_sockets=true + + "$SSL_NATIVE" && using_sockets=false + + # Try each public key type once: + # ciphers_to_test[1]: cipher suites using certificates with RSA signature public keys + # ciphers_to_test[2]: cipher suites using certificates with RSA key encipherment public keys + # ciphers_to_test[3]: cipher suites using certificates with DSA signature public keys + # ciphers_to_test[4]: cipher suites using certificates with DH key agreement public keys + # ciphers_to_test[5]: cipher suites using certificates with ECDH key agreement public keys + # ciphers_to_test[6]: cipher suites using certificates with ECDSA signature public keys + # ciphers_to_test[7]: cipher suites using certificates with GOST R 34.10 (either 2001 or 94) public keys + ciphers_to_test[1]="aRSA:eRSA" + ciphers_to_test[2]="" + ciphers_to_test[3]="aDSS:aDH:aECDH:aECDSA:aGOST" + ciphers_to_test[4]="" + ciphers_to_test[5]="" + ciphers_to_test[6]="" + ciphers_to_test[7]="" + ciphers_to_test[8]="tls1_3_RSA" + ciphers_to_test[9]="tls1_3_ECDSA" + certificate_type[1]="" ; certificate_type[2]="" + certificate_type[3]=""; certificate_type[4]="" + certificate_type[5]="" ; certificate_type[6]="" + certificate_type[7]="" ; certificate_type[8]="RSASig" + certificate_type[9]="ECDSA" + + for (( n=1; n <= 16 ; n++ )); do + # Some servers use a different certificate if the ClientHello + # specifies TLSv1.1 and doesn't include a server name extension. + # So, for each public key type for which a certificate was found, + # try again, but only with TLSv1.1 and without SNI. + if [[ $n -ne 1 ]] && [[ "$OPTIMAL_PROTO" == -ssl2 ]]; then + ciphers_to_test[n]="" + elif [[ $n -ge 10 ]]; then + ciphers_to_test[n]="" + [[ ${success[n-9]} -eq 0 ]] && [[ $(has_server_protocol "tls1_1") -ne 1 ]] && \ + ciphers_to_test[n]="${ciphers_to_test[n-9]}" && certificate_type[n]="${certificate_type[n-9]}" + fi + + if [[ -n "${ciphers_to_test[n]}" ]]; then + if [[ $n -ge 10 ]]; then + sni="$SNI" + SNI="" + get_server_certificate "${ciphers_to_test[n]}" "tls1_1" + success[n]=$? + SNI="$sni" + else + get_server_certificate "${ciphers_to_test[n]}" + success[n]=$? + fi + if [[ ${success[n]} -eq 0 ]] && [[ -s "$HOSTCERT" ]]; then + [[ $n -ge 10 ]] && [[ ! -e $HOSTCERT.nosni ]] && cp $HOSTCERT $HOSTCERT.nosni + cp "$TEMPDIR/$NODEIP.get_server_certificate.txt" $TMPFILE + >$ERRFILE + if [[ -z "$sessticket_lifetime_hint" ]]; then + sessticket_lifetime_hint=$(awk '/session ticket life/ { if (!found) print; found=1 }' $TMPFILE) + sessticket_proto="$(get_protocol "$TMPFILE")" + fi + + if [[ $n -le 7 ]]; then + ciph="$(get_cipher $TMPFILE)" + if [[ "$ciph" != TLS_* ]] && [[ "$ciph" != SSL_* ]]; then + ciph="$(openssl2rfc "$ciph")" + fi + if [[ "$ciph" == TLS_DHE_RSA_* ]] || [[ "$ciph" == TLS_ECDHE_RSA_* ]] || [[ "$ciph" == TLS_CECPQ1_RSA_* ]]; then + certificate_type[n]="RSASig" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aRSA/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aRSA" + elif [[ "$ciph" == TLS_RSA_* ]] || [[ "$ciph" == SSL_* ]] || [[ "$ciph" == TLS_GOST*_RSA_* ]]; then + certificate_type[n]="RSAKMK" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/eRSA/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="eRSA" + elif [[ "$ciph" == TLS_DHE_DSS_* ]]; then + certificate_type[n]="DSA" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aDSS/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aDSS" + elif [[ "$ciph" == TLS_DH_* ]]; then + certificate_type[n]="DH" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aDH/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aDH" + elif [[ "$ciph" == TLS_ECDH_* ]]; then + certificate_type[n]="ECDH" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aECDH/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aECDH" + elif [[ "$ciph" == TLS_ECDHE_ECDSA_* ]] || [[ "$ciph" == TLS_CECPQ1_ECDSA_* ]]; then + certificate_type[n]="ECDSA" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aECDSA/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aECDSA" + elif [[ "$ciph" == TLS_GOST* ]]; then + certificate_type[n]="GOST" + if [[ -z "${ciphers_to_test[n+1]}" ]]; then + ciphers_to_test[n+1]="${ciphers_to_test[n]/aGOST/}" + ciphers_to_test[n+1]="${ciphers_to_test[n+1]/::/:}" + [[ "${ciphers_to_test[n+1]:0:1}" == : ]] && ciphers_to_test[n+1]="${ciphers_to_test[n+1]:1}" + fi + ciphers_to_test[n]="aGOST" + fi + fi + # check whether the host's certificate has been seen before + match_found=false + i=1 + newhostcert=$(cat $HOSTCERT) + while [[ $i -le $certs_found ]]; do + if [[ "$newhostcert" == "${previous_hostcert[i]}" ]]; then + match_found=true + break; + fi + i=$((i + 1)) + done + if ! "$match_found" && [[ $n -ge 10 ]] && [[ $certs_found -ne 0 ]]; then + # A new certificate was found using TLSv1.1 without SNI. + # Check to see if the new certificate should be displayed. + # It should be displayed if it is either a match for the + # $NODE being tested or if it has the same subject + # (CN and SAN) as other certificates for this host. + compare_server_name_to_cert "$HOSTCERT" + [[ $? -ne 0 ]] && success[n]=0 || success[n]=1 + + if [[ ${success[n]} -ne 0 ]]; then + cn_nosni="$(toupper "$(get_cn_from_cert $HOSTCERT)")" + sans_nosni="$(toupper "$(get_san_dns_from_cert "$HOSTCERT")")" + + echo "${previous_hostcert[1]}" > $HOSTCERT + cn_sni="$(toupper "$(get_cn_from_cert $HOSTCERT)")" + + # FIXME: Not sure what the matching rule should be. At + # the moment, the no SNI certificate is considered a + # match if the CNs are the same and the SANs (if + # present) contain at least one DNS name in common. + if [[ "$cn_nosni" == "$cn_sni" ]]; then + sans_sni="$(toupper "$(get_san_dns_from_cert "$HOSTCERT")")" + if [[ "$sans_nosni" == "$sans_sni" ]]; then + success[n]=0 + else + while read -r san; do + [[ -n "$san" ]] && [[ " $sans_sni " =~ " $san " ]] && success[n]=0 && break + done <<< "$sans_nosni" + fi + fi + fi + # If the certificate found for TLSv1.1 w/o SNI appears to + # be for a different host, then set match_found to true so + # that the new certificate will not be included in the output. + [[ ${success[n]} -ne 0 ]] && match_found=true + fi + if ! "$match_found"; then + certs_found=$(( certs_found + 1)) + cipher[certs_found]=${ciphers_to_test[n]} + keysize[certs_found]=$(awk '/Server public key/ { print $(NF-1) }' $TMPFILE) + # If an OCSP response was sent, then get the full + # response so that certificate_info() can determine + # whether it includes a certificate transparency extension. + ocsp_response_binary[certs_found]="$STAPLED_OCSP_RESPONSE" + if grep -a "OCSP response:" $TMPFILE | grep -q "no response sent"; then + ocsp_response[certs_found]="$(grep -a "OCSP response" $TMPFILE)" + else + ocsp_response[certs_found]="$(awk -v n=2 '/OCSP response:/ {start=1; inc=2} /======================================/ { if (start) {inc--} } inc' $TMPFILE)" + fi + ocsp_response_status[certs_found]=$(grep -a "OCSP Response Status" $TMPFILE) + previous_hostcert[certs_found]=$newhostcert + previous_hostcert_txt[certs_found]="$($OPENSSL x509 -noout -text 2>>$ERRFILE <<< "$newhostcert")" + previous_intermediates[certs_found]=$(cat $TEMPDIR/intermediatecerts.pem) + previous_hostcert_issuer[certs_found]="" + [[ -n "${previous_intermediates[certs_found]}" ]] && [[ -r $TEMPDIR/hostcert_issuer.pem ]] && \ + previous_hostcert_issuer[certs_found]=$(cat $TEMPDIR/hostcert_issuer.pem) + previous_ordering_problem[certs_found]=$CERTIFICATE_LIST_ORDERING_PROBLEM + [[ $n -ge 10 ]] && sni_used[certs_found]="" || sni_used[certs_found]="$SNI" + tls_version[certs_found]="$DETECTED_TLS_VERSION" + previous_hostcert_type[certs_found]=" ${certificate_type[n]}" + if [[ $DEBUG -ge 1 ]]; then + echo "${previous_hostcert[certs_found]}" > $TEMPDIR/host_certificate_$certs_found.pem + echo "${previous_hostcert_txt[certs_found]}" > $TEMPDIR/host_certificate_$certs_found.txt + fi + else + previous_hostcert_type[i]+=" ${certificate_type[n]}" + fi + fi + fi + done + + determine_tls_extensions + + if [[ $? -eq 0 ]] && [[ "$OPTIMAL_PROTO" != -ssl2 ]]; then + cp "$TEMPDIR/$NODEIP.determine_tls_extensions.txt" $TMPFILE + >$ERRFILE + if [[ -z "$sessticket_lifetime_hint" ]]; then + sessticket_lifetime_hint=$(awk '/session ticket lifetime/ { if (!found) print; found=1 }' $TMPFILE) + sessticket_proto="$(get_protocol "$TMPFILE")" + fi + fi + if "$using_sockets" && ! "$TLS13_ONLY" && [[ -z "$sessticket_lifetime_hint" ]] && [[ "$OPTIMAL_PROTO" != -ssl2 ]]; then + if "$HAS_TLS13" && ( [[ -z "$OPTIMAL_PROTO" ]] || [[ "$OPTIMAL_PROTO" == -tls1_3 ]] ) ; then + # If a session ticket were sent in response to a TLSv1.3 ClientHello, then a session ticket + # would have been found by get_server_certificate(). So, try again with a TLSv1.2 ClientHello. + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -no_tls1_3 -connect $NODEIP:$PORT $PROXY $SNI") $ERRFILE >$TMPFILE + else + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS "$OPTIMAL_PROTO" -connect $NODEIP:$PORT $PROXY $SNI") $ERRFILE >$TMPFILE + fi + if sclient_connect_successful $? $TMPFILE; then + sessticket_lifetime_hint=$(awk '/session ticket lifetime/ { if (!found) print; found=1 }' $TMPFILE) + sessticket_proto="$(get_protocol "$TMPFILE")" + fi + fi + [[ -z "$sessticket_lifetime_hint" ]] && TLS_TICKETS=false || TLS_TICKETS=true + + debugme echo "# certificates found $certs_found" + # Now that all of the server's certificates have been found, determine for + # each certificate whether certificate transparency information is provided. + for (( i=1; i <= certs_found; i++ )); do + ct[i]="$(certificate_transparency "${previous_hostcert_txt[i]}" "${ocsp_response[i]}" "$certs_found" "${cipher[i]}" "${sni_used[i]}" "${tls_version[i]}")" + # If certificate_transparency() called tls_sockets() and found a "signed certificate timestamps" extension, + # then add it to $TLS_EXTENSIONS, since it may not have been found by determine_tls_extensions(). + [[ $certs_found -gt 1 ]] && [[ "${ct[i]}" == TLS\ extension ]] && extract_new_tls_extensions "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" + done + + outln + pr_headlineln " Testing server defaults (Server Hello) " + outln + + pr_bold " TLS extensions (standard) " + if [[ -z "$TLS_EXTENSIONS" ]]; then + outln "(none)" + fileout "TLS_extensions" "INFO" "(none)" + else +#FIXME: we rather want to have the chance to print each ext in italics or another format. +# Atm is a string of quoted strings -- that needs to be fixed at the root then + # out_row_aligned_max_width() places line breaks at space characters. + # So, in order to prevent the text for an extension from being broken + # across lines, temporarily replace space characters within the text + # of an extension with "}", and then convert the "}" back to space in + # the output of out_row_aligned_max_width(). + tls_extensions="${TLS_EXTENSIONS// /{}" + tls_extensions="${tls_extensions//\"{\"/\" \"}" + tls_extensions="$(out_row_aligned_max_width "$tls_extensions" " " $TERM_WIDTH)" + tls_extensions="${tls_extensions//{/ }" + outln "$tls_extensions" + fileout "TLS_extensions" "INFO" "$TLS_EXTENSIONS" + fi + + pr_bold " Session Ticket RFC 5077 hint " + jsonID="TLS_session_ticket" + if [[ -z "$sessticket_lifetime_hint" ]]; then + outln "no -- no lifetime advertised" + fileout "${jsonID}" "INFO" "no -- no lifetime advertised" + # it MAY be given a hint of the lifetime of the ticket, see https://tools.ietf.org/html/rfc5077#section-5.6 . + # Sometimes it just does not -- but it then may also support TLS session tickets reuse + else + lifetime=$(grep -a lifetime <<< "$sessticket_lifetime_hint" | sed 's/[A-Za-z:() ]//g') + unit=$(grep -a lifetime <<< "$sessticket_lifetime_hint" | sed -e 's/^.*'"$lifetime"'//' -e 's/[ ()]//g') + out "$lifetime $unit" + if [[ $((3600 * 24)) -lt $lifetime ]]; then + prln_svrty_low " but: PFS requires session ticket keys to be rotated < daily !" + fileout "$jsonID" "LOW" "valid for $lifetime $unit (>daily)" + else + outln ", session tickets keys seems to be rotated < daily" + fileout "$jsonID" "INFO" "valid for $lifetime $unit only (>$ERRFILE $HOSTCERT.nosni + fi + else + >$HOSTCERT.nosni + fi + mv $HOSTCERT.save $HOSTCERT + SNI="$sni" + else + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $OPTIMAL_PROTO") 2>>$ERRFILE $HOSTCERT.nosni + fi + elif [[ $certs_found -eq 0 ]] && [[ -s "$HOSTCERT" ]]; then + outln + generic_nonfatal "Client problem, shouldn't happen: Host certificate found but we can't continue with \"server defaults\"." + elif [[ $certs_found -eq 0 ]]; then + outln + if $TLS13_ONLY; then + generic_nonfatal "Client problem: We need openssl supporting TLS 1.3. We can't continue with \"server defaults\" as we cannot retrieve the certificate. " + else + generic_nonfatal "Client problem, No server cerificate could be retrieved. Thus we can't continue with \"server defaults\"." + fi + fi + [[ $DEBUG -ge 1 ]] && [[ -e $HOSTCERT.nosni ]] && $OPENSSL x509 -in $HOSTCERT.nosni -text -noout 2>>$ERRFILE > $HOSTCERT.nosni.txt + + fileout "cert_numbers" "INFO" "$certs_found" + for (( i=1; i <= certs_found; i++ )); do + echo "${previous_hostcert[i]}" > $HOSTCERT + echo "${previous_intermediates[i]}" > $TEMPDIR/intermediatecerts.pem + echo "${previous_hostcert_issuer[i]}" > $TEMPDIR/hostcert_issuer.pem + certificate_info "$i" "$certs_found" "${previous_hostcert_txt[i]}" \ + "${cipher[i]}" "${keysize[i]}" "${previous_hostcert_type[i]}" \ + "${ocsp_response_binary[i]}" "${ocsp_response[i]}" \ + "${ocsp_response_status[i]}" "${sni_used[i]}" "${ct[i]}" \ + "${previous_ordering_problem[i]}" + [[ $? -ne 0 ]] && ((ret++)) + done + return $ret +} + +get_session_ticket_lifetime_from_serverhello() { + awk '/session ticket.*lifetime/ { print $(NF-1) "$1" }' +} + +get_san_dns_from_cert() { + echo "$($OPENSSL x509 -in "$1" -noout -text 2>>$ERRFILE | \ + grep -A2 "Subject Alternative Name" | tr ',' '\n' | grep "DNS:" | \ + sed -e 's/DNS://g' -e 's/ //g')" +} + + +run_pfs() { + local -i sclient_success + local pfs_offered=false ecdhe_offered=false ffdhe_offered=false + local pfs_tls13_offered=false + local protos_to_try proto hexc dash pfs_cipher sslvers auth mac export curve dhlen + local -a hexcode normalized_hexcode ciph rfc_ciph kx enc ciphers_found sigalg ossl_supported + # generated from 'kEECDH:kEDH:!aNULL:!eNULL:!DES:!3DES:!RC4' with openssl 1.0.2i and openssl 1.1.0 + local pfs_cipher_list="DHE-DSS-AES128-GCM-SHA256:DHE-DSS-AES128-SHA256:DHE-DSS-AES128-SHA:DHE-DSS-AES256-GCM-SHA384:DHE-DSS-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-DSS-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA:DHE-DSS-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA:DHE-DSS-SEED-SHA:DHE-RSA-AES128-CCM8:DHE-RSA-AES128-CCM:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-CCM8:DHE-RSA-AES256-CCM:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA:DHE-RSA-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-RSA-CHACHA20-POLY1305-OLD:DHE-RSA-CHACHA20-POLY1305:DHE-RSA-SEED-SHA:ECDHE-ECDSA-AES128-CCM8:ECDHE-ECDSA-AES128-CCM:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-CCM8:ECDHE-ECDSA-AES256-CCM:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305-OLD:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-RSA-CHACHA20-POLY1305-OLD:ECDHE-RSA-CHACHA20-POLY1305" + local pfs_hex_cipher_list="" ciphers_to_test tls13_ciphers_to_test + local ecdhe_cipher_list="" tls13_cipher_list="" ecdhe_cipher_list_hex="" ffdhe_cipher_list_hex="" + local curves_hex=("00,01" "00,02" "00,03" "00,04" "00,05" "00,06" "00,07" "00,08" "00,09" "00,0a" "00,0b" "00,0c" "00,0d" "00,0e" "00,0f" "00,10" "00,11" "00,12" "00,13" "00,14" "00,15" "00,16" "00,17" "00,18" "00,19" "00,1a" "00,1b" "00,1c" "00,1d" "00,1e") + local -a curves_ossl=("sect163k1" "sect163r1" "sect163r2" "sect193r1" "sect193r2" "sect233k1" "sect233r1" "sect239k1" "sect283k1" "sect283r1" "sect409k1" "sect409r1" "sect571k1" "sect571r1" "secp160k1" "secp160r1" "secp160r2" "secp192k1" "prime192v1" "secp224k1" "secp224r1" "secp256k1" "prime256v1" "secp384r1" "secp521r1" "brainpoolP256r1" "brainpoolP384r1" "brainpoolP512r1" "X25519" "X448") + local -a curves_ossl_output=("K-163" "sect163r1" "B-163" "sect193r1" "sect193r2" "K-233" "B-233" "sect239k1" "K-283" "B-283" "K-409" "B-409" "K-571" "B-571" "secp160k1" "secp160r1" "secp160r2" "secp192k1" "P-192" "secp224k1" "P-224" "secp256k1" "P-256" "P-384" "P-521" "brainpoolP256r1" "brainpoolP384r1" "brainpoolP512r1" "X25519" "X448") + local -ai curves_bits=(163 162 163 193 193 232 233 238 281 282 407 409 570 570 161 161 161 192 192 225 224 256 256 384 521 256 384 512 253 448) + # Many curves have been deprecated, and RFC 8446, Appendix B.3.1.4, states + # that these curves MUST NOT be offered in a TLS 1.3 ClientHello. + local -a curves_deprecated=("true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "true" "false" "false" "false" "true" "true" "true" "false" "false") + local -a ffdhe_groups_hex=("01,00" "01,01" "01,02" "01,03" "01,04") + local -a ffdhe_groups_output=("ffdhe2048" "ffdhe3072" "ffdhe4096" "ffdhe6144" "ffdhe8192") + local -a supported_curve + local -i nr_supported_ciphers=0 nr_curves=0 nr_ossl_curves=0 i j low high + local pfs_ciphers curves_offered="" curves_to_test temp + local len1 len2 curve_found + local key_bitstring quality_str + local -i len_dh_p quality + local has_dh_bits="$HAS_DH_BITS" + local using_sockets=true + local jsonID="PFS" + + "$SSL_NATIVE" && using_sockets=false + "$FAST" && using_sockets=false + [[ $TLS_NR_CIPHERS == 0 ]] && using_sockets=false + + outln + pr_headline " Testing robust (perfect) forward secrecy"; prln_underline ", (P)FS -- omitting Null Authentication/Encryption, 3DES, RC4 " + if ! "$using_sockets"; then + [[ $TLS_NR_CIPHERS == 0 ]] && ! "$SSL_NATIVE" && ! "$FAST" && pr_warning " Cipher mapping not available, doing a fallback to openssl" + if ! "$HAS_DH_BITS" && "$WIDE"; then + [[ $TLS_NR_CIPHERS == 0 ]] && ! "$SSL_NATIVE" && ! "$FAST" && out "." + pr_warning " (Your $OPENSSL cannot show DH/ECDH bits)" + fi + outln + fi + + if "$using_sockets" || [[ $OSSL_VER_MAJOR -lt 1 ]]; then + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + pfs_cipher="${TLS_CIPHER_RFC_NAME[i]}" + hexc="${TLS_CIPHER_HEXCODE[i]}" + if ( [[ "$pfs_cipher" == "TLS_DHE_"* ]] || [[ "$pfs_cipher" == "TLS_ECDHE_"* ]] || [[ "${hexc:2:2}" == "13" ]] ) && \ + [[ ! "$pfs_cipher" =~ NULL ]] && [[ ! "$pfs_cipher" =~ DES ]] && [[ ! "$pfs_cipher" =~ RC4 ]] && \ + [[ ! "$pfs_cipher" =~ PSK ]] && ( "$using_sockets" || "${TLS_CIPHER_OSSL_SUPPORTED[i]}" ); then + pfs_hex_cipher_list+=", ${hexc:2:2},${hexc:7:2}" + ciph[nr_supported_ciphers]="${TLS_CIPHER_OSSL_NAME[i]}" + rfc_ciph[nr_supported_ciphers]="${TLS_CIPHER_RFC_NAME[i]}" + kx[nr_supported_ciphers]="${TLS_CIPHER_KX[i]}" + enc[nr_supported_ciphers]="${TLS_CIPHER_ENC[i]}" + ciphers_found[nr_supported_ciphers]=false + sigalg[nr_supported_ciphers]="" + ossl_supported[nr_supported_ciphers]="${TLS_CIPHER_OSSL_SUPPORTED[i]}" + hexcode[nr_supported_ciphers]="${hexc:2:2},${hexc:7:2}" + if [[ "${hexc:2:2}" == 00 ]]; then + normalized_hexcode[nr_supported_ciphers]="x${hexc:7:2}" + else + normalized_hexcode[nr_supported_ciphers]="x${hexc:2:2}${hexc:7:2}" + fi + "$using_sockets" && ! "$has_dh_bits" && "$WIDE" && ossl_supported[nr_supported_ciphers]=false + nr_supported_ciphers+=1 + fi + done + else + while read -r hexc dash ciph[nr_supported_ciphers] sslvers kx[nr_supported_ciphers] auth enc[nr_supported_ciphers] mac export; do + ciphers_found[nr_supported_ciphers]=false + if [[ "${hexc:2:2}" == 00 ]]; then + normalized_hexcode[nr_supported_ciphers]="x${hexc:7:2}" + else + normalized_hexcode[nr_supported_ciphers]="x${hexc:2:2}${hexc:7:2}" + fi + sigalg[nr_supported_ciphers]="" + ossl_supported[nr_supported_ciphers]=true + nr_supported_ciphers+=1 + done < <(actually_supported_osslciphers "$pfs_cipher_list" "ALL" "-V") + fi + export="" + + if [[ $(has_server_protocol "tls1_3") -eq 0 ]]; then + # All TLSv1.3 cipher suites offer robust PFS. + sclient_success=0 + elif "$using_sockets"; then + tls_sockets "04" "${pfs_hex_cipher_list:2}, 00,ff" + sclient_success=$? + [[ $sclient_success -eq 2 ]] && sclient_success=0 + else + debugme echo $nr_supported_ciphers + debugme echo $(actually_supported_osslciphers $pfs_cipher_list "ALL") + if [[ "$nr_supported_ciphers" -le "$CLIENT_MIN_PFS" ]]; then + outln + prln_local_problem "You only have $nr_supported_ciphers PFS ciphers on the client side " + fileout "$jsonID" "WARN" "tests skipped as you only have $nr_supported_ciphers PFS ciphers on the client site. ($CLIENT_MIN_PFS are required)" + return 1 + fi + $OPENSSL s_client $(s_client_options "-cipher $pfs_cipher_list -ciphersuites "ALL" $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE $TMPFILE $TMPFILE $ERRFILE >$TMPFILE + [[ $? -ne 0 ]] && ret=1 + tmpstr="$(grep -a '^Protocols' $TMPFILE | sed 's/Protocols.*: //')" + if [[ -z "$tmpstr" ]] || [[ "$tmpstr" == " " ]]; then + outln "not offered" + fileout "$jsonID" "INFO" "not offered" + else + # now comes a strange thing: "Protocols advertised by server:" is empty but connection succeeded + if [[ "$tmpstr" =~ [h2|spdy|http] ]]; then + out "$tmpstr" + outln " (advertised)" + fileout "$jsonID" "INFO" "offered with $tmpstr (advertised)" + else + prln_cyan "please check manually, server response was ambiguous ..." + fileout "$jsonID" "INFO" "please check manually, server response was ambiguous ..." + ((ret++)) + fi + fi + # btw: nmap can do that too https://nmap.org/nsedoc/scripts/tls-nextprotoneg.html + # nmap --script=tls-nextprotoneg #NODE -p $PORT is your friend if your openssl doesn't want to test this + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + + +run_alpn() { + local tmpstr alpn_extn len + local -i ret=0 + local has_alpn_proto=false + local alpn_finding="" + local jsonID="ALPN" + + [[ -n "$STARTTLS" ]] && return 0 + pr_bold " ALPN/HTTP2 " + if ! alpn_pre; then + outln + return 0 + fi + for proto in $ALPN_PROTOs; do + # for some reason OpenSSL doesn't list the advertised protocols, so instead try common protocols + if "$HAS_ALPN"; then + $OPENSSL s_client $(s_client_options "-connect $NODEIP:$PORT $BUGS $SNI -alpn $proto") $ERRFILE >$TMPFILE + else + alpn_extn="$(printf "%02x" ${#proto}),$(string_to_asciihex "$proto")" + len="$(printf "%04x" $((${#proto}+1)))" + alpn_extn="${len:0:2},${len:2:2},$alpn_extn" + len="$(printf "%04x" $((${#proto}+3)))" + alpn_extn="00,10,${len:0:2},${len:2:2},$alpn_extn" + tls_sockets "03" "$TLS12_CIPHER" "all+" "$alpn_extn" + if [[ -r "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" ]]; then + cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + else + echo "" > $TMPFILE + fi + fi + tmpstr="$(awk -F':' '/^ALPN protocol*:/ { print $2 }' $TMPFILE)" + if [[ "$tmpstr" == *"$proto" ]]; then + if ! $has_alpn_proto; then + has_alpn_proto=true + else + out ", " + fi + # only h2 is what browser need to use HTTP/2.0 and brings a security,privacy and performance benefit + if [[ "$proto" == "h2" ]]; then + pr_svrty_good "$proto" + fileout "${jsonID}_HTTP2" "OK" "$proto" + else + out "$proto" + alpn_finding+="$proto" + fi + fi + done + if $has_alpn_proto; then + outln " (offered)" + # if h2 is not the only protocol: + [[ -n "$alpn_finding" ]] && fileout "$jsonID" "INFO" "$alpn_finding" + else + outln "not offered" + fileout "$jsonID" "INFO" "not offered" + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + +# arg1: send string +# arg2: success string: an egrep pattern +# arg3: number of loops we should read from the buffer (optional, otherwise STARTTLS_SLEEP) +starttls_io() { + local nr_waits=$STARTTLS_SLEEP + local buffer="" + local -i i + + [[ -n "$3" ]] && waitsleep=$3 + [[ -z "$2" ]] && echo "FIXME $((LINENO))" + + # If there's a sending part it's IO. Postgres sends via socket and replies via + # strings "S". So there's no I part of IO ;-) + if [[ -n "$1" ]]; then + debugme echo -en "C: $1" + echo -en "$1" >&5 + fi + + # This seems a bit dangerous but works. No blockings yet. "if=nonblock" doesn't work on BSDs + buffer="$(dd bs=512 count=1 <&5 2>/dev/null)" + + for ((i=1; i < $nr_waits; i++ )); do + [[ "$DEBUG" -ge 2 ]] && echo -en "\nS: " && echo $buffer + if [[ "$buffer" =~ $2 ]]; then + debugme echo " ---> reply matched \"$2\"" + # the fd sometimes still seem to contain chars which confuses the following TLS handshake, trying to empty: + # dd of=/dev/null bs=512 count=1 <&5 2>/dev/null + return 0 + else + # no match yet, more reading from fd helps. + buffer+=$(dd bs=512 count=1 <&5 2>/dev/null) + fi + done + return 1 +} + + +# Line-based send with newline characters appended (arg2 empty) +# Stream-based send: arg2: +starttls_just_send(){ + if [[ -z "$2" ]] ; then + debugme echo -e "C: $1 plus lf" + echo -ne "$1\r\n" >&5 + else + debugme echo -e "C: $1" + echo -ne "$1" >&5 + fi + return $? +} + +# arg1: (optional): wait time +starttls_just_read(){ + local waitsleep=$STARTTLS_SLEEP + [[ -n "$1" ]] && waitsleep=$1 + if [[ "$DEBUG" -ge 2 ]]; then + echo "=== just read banner ===" + cat <&5 & + else + dd of=/dev/null count=8 <&5 2>/dev/null & + fi + wait_kill $! $waitsleep + return 0 +} + +starttls_full_read(){ + local starttls_read_data=() + local one_line="" + local ret=0 + local cont_pattern="$1" + local end_pattern="$2" + local ret_found=0 + + debugme echo "=== reading banner ... ===" + if [[ $# -ge 3 ]]; then + debugme echo "=== we'll have to search for \"$3\" pattern ===" + ret_found=3 + fi + + local oldIFS="$IFS" + IFS='' + while read -r -t $STARTTLS_SLEEP one_line; ret=$?; (exit $ret); do + debugme echo "S: ${one_line}" + if [[ $# -ge 3 ]]; then + if [[ ${one_line} =~ $3 ]]; then + ret_found=0 + debugme echo "^^^^^^^ that's what we were looking for ===" + fi + fi + starttls_read_data+=("${one_line}") + if [[ $DEBUG -ge 4 ]]; then + echo "one_line: ${one_line}" + echo "end_pattern: ${end_pattern}" + echo "cont_pattern: ${cont_pattern}" + fi + if [[ ${one_line} =~ ${end_pattern} ]]; then + debugme echo "=== full read finished ===" + IFS="${oldIFS}" + return ${ret_found} + fi + if [[ ! ${one_line} =~ ${cont_pattern} ]]; then + debugme echo "=== full read syntax error, expected regex pattern ${cont_pattern} (cont) or ${end_pattern} (end) ===" + IFS="${oldIFS}" + return 2 + fi + done <&5 + if [[ $DEBUG -ge 2 ]]; then + if [[ $ret -ge 128 ]]; then + echo "=== timeout reading ===" + else + echo "=== full read error (no timeout) ===" + fi + fi + IFS="${oldIFS}" + return $ret +} + +starttls_ftp_dialog() { + debugme echo "=== starting ftp STARTTLS dialog ===" + local reAUTHTLS='^ AUTH TLS' + starttls_full_read '^220-' '^220 ' && debugme echo "received server greeting" && + starttls_just_send 'FEAT' && debugme echo "sent FEAT" && + starttls_full_read '^(211-| )' '^211 ' "${reAUTHTLS}" && debugme echo "received server features and checked STARTTLS availability" && + starttls_just_send 'AUTH TLS' && debugme echo "initiated STARTTLS" && + starttls_full_read '^234-' '^234 ' && debugme echo "received ack for STARTTLS" + local ret=$? + debugme echo "=== finished ftp STARTTLS dialog with ${ret} ===" + return $ret +} + +# argv1: empty: SMTP, "lmtp" : LMTP +# +starttls_smtp_dialog() { + local greet_str="EHLO" + local proto="smtp" + + if [[ "$1" == lmtp ]]; then + proto="lmtp" + greet_str="LHLO" + fi + debugme echo "=== starting $proto STARTTLS dialog ===" + + local re250STARTTLS='^250[ -]STARTTLS' + starttls_full_read '^220-' '^220 ' && debugme echo "received server greeting" && + starttls_just_send "$greet_str testssl.sh" && debugme echo "sent $greet_str" && + starttls_full_read '^250-' '^250 ' "${re250STARTTLS}" && debugme echo "received server capabilities and checked STARTTLS availability" && + starttls_just_send 'STARTTLS' && debugme echo "initiated STARTTLS" && + starttls_full_read '^220-' '^220 ' && debugme echo "received ack for STARTTLS" + local ret=$? + debugme echo "=== finished $proto STARTTLS dialog with ${ret} ===" + return $ret +} + +starttls_pop3_dialog() { + debugme echo "=== starting pop3 STARTTLS dialog ===" + starttls_full_read '^\+OK' '^\+OK' && debugme echo "received server greeting" && + starttls_just_send 'STLS' && debugme echo "initiated STARTTLS" && + starttls_full_read '^\+OK' '^\+OK' && debugme echo "received ack for STARTTLS" + local ret=$? + debugme echo "=== finished pop3 STARTTLS dialog with ${ret} ===" + return $ret +} + +starttls_imap_dialog() { + debugme echo "=== starting imap STARTTLS dialog ===" + local reSTARTTLS='^\* CAPABILITY(( .*)? IMAP4rev1( .*)? STARTTLS(.*)?|( .*)? STARTTLS( .*)? IMAP4rev1(.*)?)$' + starttls_full_read '^\* ' '^\* OK ' && debugme echo "received server greeting" && + starttls_just_send 'a001 CAPABILITY' && debugme echo "sent CAPABILITY" && + starttls_full_read '^\* ' '^a001 OK ' "${reSTARTTLS}" && debugme echo "received server capabilities and checked STARTTLS availability" && + starttls_just_send 'a002 STARTTLS' && debugme echo "initiated STARTTLS" && + starttls_full_read '^\* ' '^a002 OK ' && debugme echo "received ack for STARTTLS" + local ret=$? + debugme echo "=== finished imap STARTTLS dialog with ${ret} ===" + return $ret +} + +starttls_xmpp_dialog() { + debugme echo "=== starting xmpp STARTTLS dialog ===" + [[ -z $XMPP_HOST ]] && XMPP_HOST="$NODE" + + starttls_io "" 'starttls(.*)features' 1 && + starttls_io "" ' 30 38 53 30 31 42 61 64 20 68 61 6e 64 73 68 61 6b 65). + # also there's a banner in the reply "mysql_native_password" + # TODO: We could detect if the server supports STARTTLS via the "Server Capabilities" + # bit field, but we'd need to parse the binary stream, with greater precision than regex. + local ret=$? + debugme echo "=== finished mysql STARTTLS dialog with ${ret} ===" + return $ret +} + +# arg1: fd for socket -- which we don't use as it is a hassle and it is not clear whether it works under every bash version +# returns 6 if opening the socket caused a problem, 1 if STARTTLS handshake failed, 0: all ok +# +fd_socket() { + local jabber="" + local proyxline="" + local nodeip="$(tr -d '[]' <<< $NODEIP)" # sockets do not need the square brackets we have of IPv6 addresses + # we just need do it here, that's all! + if [[ -t 5 ]]; then + pr_warning "$PROG_NAME: unable to open a socket because of a tty conflict" + return 6 + fi + if [[ -n "$PROXY" ]]; then + # PROXYNODE works better than PROXYIP on modern versions of squid + if ! exec 5<> /dev/tcp/${PROXYNODE}/${PROXYPORT}; then + outln + pr_warning "$PROG_NAME: unable to open a socket to proxy $PROXYNODE:$PROXYPORT" + return 6 + fi + if "$DNS_VIA_PROXY"; then + printf -- "%b" "CONNECT $NODE:$PORT HTTP/1.0\n\n" >&5 + else + printf -- "%b" "CONNECT $nodeip:$PORT HTTP/1.0\n\n" >&5 + fi + while true; do + read -t $PROXY_WAIT -r proyxline <&5 + if [[ $? -ge 128 ]]; then + pr_warning "Proxy timed out. Unable to CONNECT via proxy. " + close_socket + return 6 + elif [[ "${proyxline%/*}" == HTTP ]]; then + proyxline=${proyxline#* } + if [[ "${proyxline%% *}" != 200 ]]; then + pr_warning "Unable to CONNECT via proxy. " + [[ "$PORT" != 443 ]] && prln_warning "Check whether your proxy supports port $PORT and the underlying protocol." + close_socket + return 6 + fi + fi + if [[ "$proyxline" == $'\r' ]] || [[ -z "$proyxline" ]] ; then + break + fi + done + # For the following execs: 2>/dev/null would remove a potential error message, but disables debugging. + # First we check whether a socket connect timeout was specified + elif [[ -n "$CONNECT_TIMEOUT" ]]; then + if ! $TIMEOUT_CMD $CONNECT_TIMEOUT bash -c "exec 5<>/dev/tcp/$nodeip/$PORT"; then + ((NR_SOCKET_FAIL++)) + connectivity_problem $NR_SOCKET_FAIL $MAX_SOCKET_FAIL "TCP connect problem" "repeated TCP connect problems (connect timeout), giving up" + outln + pr_warning "Unable to open a socket to $NODEIP:$PORT. " + return 6 + fi + # Now comes the the usual case + elif ! exec 5<>/dev/tcp/$nodeip/$PORT; then + ((NR_SOCKET_FAIL++)) + connectivity_problem $NR_SOCKET_FAIL $MAX_SOCKET_FAIL "TCP connect problem" "repeated TCP connect problems, giving up" + outln + pr_warning "Unable to open a socket to $NODEIP:$PORT. " + return 6 + fi + + if [[ -n "$STARTTLS" ]]; then + case "$STARTTLS_PROTOCOL" in # port + ftp|ftps) # https://tools.ietf.org/html/rfc4217, https://tools.ietf.org/html/rfc959 + starttls_ftp_dialog + ;; + smtp|smtps) # SMTP, see https://tools.ietf.org/html/rfc{2033,3207,5321} + starttls_smtp_dialog + ;; + lmtp|lmtps) # LMTP, see https://tools.ietf.org/html/rfc{2033,3207,5321} + starttls_smtp_dialog lmtp + ;; + pop3|pop3s) # POP, see https://tools.ietf.org/html/rfc2595 + starttls_pop3_dialog + ;; + nntp|nntps) # NNTP, see https://tools.ietf.org/html/rfc4642 + starttls_nntp_dialog + ;; + imap|imaps) # IMAP, https://tools.ietf.org/html/rfc2595, https://tools.ietf.org/html/rfc3501 + starttls_imap_dialog + ;; + irc|ircs) # IRC, https://ircv3.net/specs/extensions/tls-3.1.html, https://ircv3.net/specs/core/capability-negotiation.html + fatal "FIXME: IRC+STARTTLS not yet supported" $ERR_NOSUPPORT + ;; + ldap|ldaps) # LDAP, https://tools.ietf.org/html/rfc2830, https://tools.ietf.org/html/rfc4511 + fatal "FIXME: LDAP+STARTTLS over sockets not supported yet (try \"--ssl-native\")" $ERR_NOSUPPORT + ;; + acap|acaps) # ACAP = Application Configuration Access Protocol, see https://tools.ietf.org/html/rfc2595 + fatal "ACAP Easteregg: not implemented -- probably never will" $ERR_NOSUPPORT + ;; + xmpp|xmpps) # XMPP, see https://tools.ietf.org/html/rfc6120 + starttls_xmpp_dialog + # IM observatory: https://xmpp.net , XMPP server directory: https://xmpp.net/directory.php + ;; + postgres) # Postgres SQL, see https://www.postgresql.org/docs/devel/static/protocol-message-formats.html + starttls_postgres_dialog + ;; + mysql) # MySQL, see https://dev.mysql.com/doc/internals/en/x-protocol-lifecycle-lifecycle.html#x-protocol-lifecycle-tls-extension + starttls_mysql_dialog + ;; + *) # we need to throw an error here -- otherwise testssl.sh treats the STARTTLS protocol as plain SSL/TLS which leads to FP + fatal "FIXME: STARTTLS protocol $STARTTLS_PROTOCOL is not yet supported" $ERR_NOSUPPORT + esac + fi + [[ $? -eq 0 ]] && return 0 + prln_warning " STARTTLS handshake failed" + return 1 +} + +close_socket(){ + exec 5<&- + exec 5>&- + return 0 +} + +send_close_notify() { + local detected_tlsversion="$1" + + debugme echo "sending close_notify..." + if [[ $detected_tlsversion == 0300 ]]; then + socksend ",x15, x03, x00, x00, x02, x02, x00" 0 + else + socksend ",x15, x03, x01, x00, x02, x02, x00" 0 + fi +} + +# Format string properly for socket +# ARG1: any commented sequence of two bytes hex, separated by commas. It can contain comments, new lines, tabs and white spaces +# NW_STR holds the global with the string prepared for printf, like '\x16\x03\x03\' +code2network() { + NW_STR=$(sed -e 's/,/\\\x/g' <<< "$1" | sed -e 's/# .*$//g' -e 's/ //g' -e '/^$/d' | tr -d '\n' | tr -d '\t') +} + +# sockets inspired by https://blog.chris007.de/using-bash-for-network-socket-operation/ +# ARG1: hexbytes separated by commas, with a leading comma +# ARG2: seconds to sleep +socksend_clienthello() { + local data="" + + code2network "$1" + data="$NW_STR" + [[ "$DEBUG" -ge 4 ]] && echo && echo "\"$data\"" + if [[ -z "$PRINTF" ]] ;then + # We could also use "dd ibs=1M obs=1M" here but is seems to be at max 3% slower + printf -- "$data" | cat >&5 2>/dev/null & + else + $PRINTF -- "$data" 2>/dev/null >&5 2>/dev/null & + fi + sleep $USLEEP_SND +} + + +# ARG1: hexbytes -- preceeded by x -- separated by commas, with a leading comma +# ARG2: seconds to sleep +socksend() { + local data line + + # read line per line and strip comments (bash internal func can't handle multiline statements + data="$(while read line; do + printf "${line%%\#*}" + done <<< "$1" )" + data="${data// /}" # strip ' ' + data="${data//,/\\}" # s&r , by \ + [[ $DEBUG -ge 4 ]] && echo && echo "\"$data\"" + if [[ -z "$PRINTF" ]] ;then + printf -- "$data" | cat >&5 2>/dev/null & + else + $PRINTF -- "$data" 2>/dev/null >&5 2>/dev/null & + fi + sleep $2 +} + + +# for SSLv2 to TLS 1.2: +# ARG1: blocksize for reading +sockread_serverhello() { + [[ -z "$2" ]] && maxsleep=$MAX_WAITSOCK || maxsleep=$2 + SOCK_REPLY_FILE=$(mktemp $TEMPDIR/ddreply.XXXXXX) || return 7 + dd bs=$1 of=$SOCK_REPLY_FILE count=1 <&5 2>/dev/null & + wait_kill $! $maxsleep + return $? +} + +#trying a faster version +# ARG1: blocksize for reading +sockread_fast() { + dd bs=$1 count=1 <&5 2>/dev/null | hexdump -v -e '16/1 "%02X"' +} + +len2twobytes() { + local len_arg1=${#1} + [[ $len_arg1 -le 2 ]] && LEN_STR=$(printf "00, %02s \n" "$1") + [[ $len_arg1 -eq 3 ]] && LEN_STR=$(printf "0%s, %02s \n" "${1:0:1}" "${1:1:2}") + [[ $len_arg1 -eq 4 ]] && LEN_STR=$(printf "%02s, %02s \n" "${1:0:2}" "${1:2:2}") +} + + +get_pub_key_size() { + local pubkey pubkeybits + local -i i len1 len + + "$HAS_PKEY" || return 1 + + # OpenSSL displays the number of bits for RSA and ECC + pubkeybits=$($OPENSSL x509 -noout -pubkey -in $HOSTCERT 2>>$ERRFILE | $OPENSSL pkey -pubin -text 2>>$ERRFILE | awk -F'(' '/Public-Key/ { print $2 }') + if [[ -n $pubkeybits ]]; then + # remainder e.g. "256 bit)" + pubkeybits="${pubkeybits//\)/}" + echo "Server public key is $pubkeybits" >> $TMPFILE + else + # This extracts the public key for DSA, DH, and GOST + pubkey=$($OPENSSL x509 -noout -pubkey -in $HOSTCERT 2>>$ERRFILE | $OPENSSL pkey -pubin -outform DER 2>>$ERRFILE | hexdump -v -e '16/1 "%02X"') + [[ -z "$pubkey" ]] && return 1 + # Skip over tag and length of subjectPublicKeyInfo + i=2 + len1="0x${pubkey:i:2}" + if [[ $len1 -lt 0x80 ]]; then + i=$i+2 + else + len1=$len1-0x80 + i=$i+2*$len1+2 + fi + + # Skip over algorithm field + i=$i+2 + len1="0x${pubkey:i:2}" + i=$i+2 + if [[ $len1 -lt 0x80 ]]; then + i=$i+2*$len1 + else + case $len1 in + 129) len="0x${pubkey:i:2}" ;; + 130) len="0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + ;; + 131) len="0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + ;; + 132) len="0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + ;; + esac + i=$i+2+2*$len + fi + + # Next is the public key BIT STRING. Skip over tag, length, and number of unused bits. + i=$i+2 + len1="0x${pubkey:i:2}" + if [[ $len1 -lt 0x80 ]]; then + i=$i+4 + else + len1=$len1-0x80 + i=$i+2*$len1+4 + fi + + # Now get the length of the public key + i=$i+2 + len1="0x${pubkey:i:2}" + i=$i+2 + if [[ $len1 -lt 0x80 ]]; then + len=$len1 + else + case $len1 in + 129) len="0x${pubkey:i:2}" ;; + 130) len="0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + ;; + 131) len="0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + i=$i+2 + len=256*$len+"0x${pubkey:i:2}" + ;; + 132) len="0x${pubkey:i:2}" + i=$i+2 + len=256*"0x${pubkey:i:2}" + i=$i+2 + len=256*"0x${pubkey:i:2}" + i=$i+2 + len=256*"0x${pubkey:i:2}" + ;; + esac + fi + len=8*$len # convert from bytes to bits + pubkeybits="$(printf "%d" $len)" + echo "Server public key is $pubkeybits bit" >> $TMPFILE + fi + return 0 +} + +# Extract the DH ephemeral key from the ServerKeyExchange message +get_dh_ephemeralkey() { + local tls_serverkeyexchange_ascii="$1" + local -i tls_serverkeyexchange_ascii_len offset + local dh_p dh_g dh_y dh_param len1 key_bitstring + local -i i dh_p_len dh_g_len dh_y_len dh_param_len + + "$HAS_PKEY" || return 1 + + tls_serverkeyexchange_ascii_len=${#tls_serverkeyexchange_ascii} + dh_p_len=2*$(hex2dec "${tls_serverkeyexchange_ascii:0:4}") + offset=4+$dh_p_len + if [[ $tls_serverkeyexchange_ascii_len -lt $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + + # Subtract any leading 0 bytes + for (( i=4; i < offset; i=i+2 )); do + [[ "${tls_serverkeyexchange_ascii:i:2}" != "00" ]] && break + dh_p_len=$dh_p_len-2 + done + if [[ $i -ge $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + dh_p="${tls_serverkeyexchange_ascii:i:dh_p_len}" + + dh_g_len=2*$(hex2dec "${tls_serverkeyexchange_ascii:offset:4}") + i=4+$offset + offset+=4+$dh_g_len + if [[ $tls_serverkeyexchange_ascii_len -lt $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + # Subtract any leading 0 bytes + for (( 1; i < offset; i=i+2 )); do + [[ "${tls_serverkeyexchange_ascii:i:2}" != "00" ]] && break + dh_g_len=$dh_g_len-2 + done + if [[ $i -ge $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + dh_g="${tls_serverkeyexchange_ascii:i:dh_g_len}" + + dh_y_len=2*$(hex2dec "${tls_serverkeyexchange_ascii:offset:4}") + i=4+$offset + offset+=4+$dh_y_len + if [[ $tls_serverkeyexchange_ascii_len -lt $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + # Subtract any leading 0 bytes + for (( 1; i < offset; i=i+2 )); do + [[ "${tls_serverkeyexchange_ascii:i:2}" != "00" ]] && break + dh_y_len=$dh_y_len-2 + done + if [[ $i -ge $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + dh_y="${tls_serverkeyexchange_ascii:i:dh_y_len}" + + # The following code assumes that all lengths can be encoded using at most 2 bytes, + # which just means that the encoded length of the public key must be less than + # 65,536 bytes. If the length is anywhere close to that, it is almost certainly an + # encoding error. + if [[ $dh_p_len+$dh_g_len+$dh_y_len -ge 131000 ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + return 1 + fi + # make ASN.1 INTEGER of p, g, and Y + [[ "0x${dh_p:0:1}" -ge 8 ]] && dh_p_len+=2 && dh_p="00$dh_p" + if [[ $dh_p_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_p_len/2)))" + elif [[ $dh_p_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_p_len/2)))" + else + len1="82$(printf "%04x" $((dh_p_len/2)))" + fi + dh_p="02${len1}$dh_p" + + [[ "0x${dh_g:0:1}" -ge 8 ]] && dh_g_len+=2 && dh_g="00$dh_g" + if [[ $dh_g_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_g_len/2)))" + elif [[ $dh_g_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_g_len/2)))" + else + len1="82$(printf "%04x" $((dh_g_len/2)))" + fi + dh_g="02${len1}$dh_g" + + [[ "0x${dh_y:0:1}" -ge 8 ]] && dh_y_len+=2 && dh_y="00$dh_y" + if [[ $dh_y_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_y_len/2)))" + elif [[ $dh_y_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_y_len/2)))" + else + len1="82$(printf "%04x" $((dh_y_len/2)))" + fi + dh_y="02${len1}$dh_y" + + # Make a SEQUENCE of p and g + dh_param_len=${#dh_p}+${#dh_g} + if [[ $dh_param_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_param_len/2)))" + elif [[ $dh_param_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_param_len/2)))" + else + len1="82$(printf "%04x" $((dh_param_len/2)))" + fi + dh_param="30${len1}${dh_p}${dh_g}" + + # Make a SEQUENCE of the parameters SEQUENCE and the OID + dh_param_len=22+${#dh_param} + if [[ $dh_param_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_param_len/2)))" + elif [[ $dh_param_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_param_len/2)))" + else + len1="82$(printf "%04x" $((dh_param_len/2)))" + fi + dh_param="30${len1}06092A864886F70D010301${dh_param}" + + # Encapsulate public key, y, in a BIT STRING + dh_y_len=${#dh_y}+2 + if [[ $dh_y_len -lt 256 ]]; then + len1="$(printf "%02x" $((dh_y_len/2)))" + elif [[ $dh_y_len -lt 512 ]]; then + len1="81$(printf "%02x" $((dh_y_len/2)))" + else + len1="82$(printf "%04x" $((dh_y_len/2)))" + fi + dh_y="03${len1}00$dh_y" + + # Create the public key SEQUENCE + i=${#dh_param}+${#dh_y} + if [[ $i -lt 256 ]]; then + len1="$(printf "%02x" $((i/2)))" + elif [[ $i -lt 512 ]]; then + len1="81$(printf "%02x" $((i/2)))" + else + len1="82$(printf "%04x" $((i/2)))" + fi + key_bitstring="30${len1}${dh_param}${dh_y}" + key_bitstring="$(asciihex_to_binary "$key_bitstring" | $OPENSSL pkey -pubin -inform DER 2> $ERRFILE)" + [[ -z "$key_bitstring" ]] && return 1 + tm_out "$key_bitstring" + return 0 +} + +# arg1: name of file with socket reply +# arg2: true if entire server hello should be parsed +# return values: 0=no SSLv2 (reset) +# 1=no SSLv2 (plaintext reply like it happens with OLS webservers) +# 3=SSLv2 supported (in $TEMPDIR/$NODEIP.sslv2_sockets.dd is reply for further processing +# --> there could be checked whether ciphers e.g have been returned at all (or anything else) +# 4=looks like an STARTTLS 5xx message +# 6=socket couldn't be opened +# 7=strange reply we can't deal with +parse_sslv2_serverhello() { + local ret v2_hello_ascii v2_hello_initbyte v2_hello_length + local v2_hello_handshake v2_cert_type v2_hello_cert_length + local v2_hello_cipherspec_length + local -i certificate_len nr_ciphers_detected offset i + local ret=3 + local parse_complete="false" + # SSLv2 server hello: in hex representation, see below + # byte 1+2: length of server hello 0123 + # 3: 04=Handshake message, server hello 45 + # 4: session id hit or not (boolean: 00=false, this 67 + # is the normal case) + # 5: certificate type, 01 = x509 89 + # 6+7 version (00 02 = SSLv2) 10-13 + # 8+9 certificate length 14-17 + # 10+11 cipher spec length 17-20 + # 12+13 connection id length + # [certificate length] ==> certificate + # [cipher spec length] ==> ciphers GOOD: HERE ARE ALL CIPHERS ALREADY! + + # Note: recent SSL/TLS stacks reply with a TLS alert on a SSLv2 client hello. + # The TLS error message is different and could be used for fingerprinting. + + if [[ "$2" == "true" ]]; then + parse_complete=true + fi + "$parse_complete" && echo "======================================" > $TMPFILE + + v2_hello_ascii=$(hexdump -v -e '16/1 "%02X"' $1) + v2_hello_ascii="${v2_hello_ascii%%[!0-9A-F]*}" + [[ "$DEBUG" -ge 5 ]] && echo "$v2_hello_ascii" + if [[ -z "$v2_hello_ascii" ]]; then + ret=0 # 1 line without any blanks: no server hello received + debugme echo "server hello empty" + else + # now scrape two bytes out of the reply per byte + v2_hello_initbyte="${v2_hello_ascii:0:1}" # normally this belongs to the next, should be 8! + v2_hello_length="${v2_hello_ascii:1:3}" # + 0x8000 see above + v2_hello_handshake="${v2_hello_ascii:4:2}" + v2_cert_type="${v2_hello_ascii:8:2}" + v2_hello_cert_length="${v2_hello_ascii:14:4}" + v2_hello_cipherspec_length="${v2_hello_ascii:18:4}" + + V2_HELLO_CIPHERSPEC_LENGTH=$(printf "%d\n" "0x$v2_hello_cipherspec_length" 2>/dev/null) + [[ $? -ne 0 ]] && ret=7 + + if [[ "${v2_hello_ascii:0:2}" == "35" ]] && "$do_starttls"; then + # this could be a 500/5xx for some weird reason where the STARTTLS handshake failed + debugme echo "$(hex2ascii "$v2_hello_ascii")" + ret=4 + elif [[ "${v2_hello_ascii:0:4}" == "1503" ]]; then + # Cloudflare does this, OpenSSL 1.1.1 and picoTLS. With different alert messages + # Just in case somebody's interested in the exact error, we deliver it ;-) + debugme echo -n ">TLS< alert message discovered: ${v2_hello_ascii} " + case "${v2_hello_ascii:10:2}" in + 01) debugme echo "(01/warning: 0x"${v2_hello_ascii:12:2}"/$(tls_alert "${v2_hello_ascii:12:2}"))" ;; + 02) debugme echo "(02/fatal: 0x"${v2_hello_ascii:12:2}"/$(tls_alert "${v2_hello_ascii:12:2}"))" ;; + *) debugme echo "("${v2_hello_ascii:10:2}" : "${v2_hello_ascii:12:2}"))" ;; + esac + ret=0 + elif [[ $v2_hello_initbyte != "8" ]] || [[ $v2_hello_handshake != "04" ]]; then + ret=1 + if [[ $DEBUG -ge 2 ]]; then + echo "no correct server hello" + echo "SSLv2 server init byte: 0x0$v2_hello_initbyte" + echo "SSLv2 hello handshake : 0x$v2_hello_handshake" + fi + fi + + if [[ $DEBUG -ge 3 ]]; then + echo "SSLv2 server hello length: 0x0$v2_hello_length" + echo "SSLv2 certificate type: 0x$v2_cert_type" + echo "SSLv2 certificate length: 0x$v2_hello_cert_length" + echo "SSLv2 cipher spec length: 0x$v2_hello_cipherspec_length" + fi + + if "$parse_complete" && [[ 2*$(hex2dec "$v2_hello_length") -ne ${#v2_hello_ascii}-4 ]]; then + ret=7 + fi + fi + + "$parse_complete" || return $ret + + # not sure why we need this + rm -f $HOSTCERT + > $TEMPDIR/intermediatecerts.pem + if [[ $ret -eq 3 ]]; then + certificate_len=2*$(hex2dec "$v2_hello_cert_length") + + if [[ "$v2_cert_type" == "01" ]] && [[ "$v2_hello_cert_length" != "00" ]]; then + asciihex_to_binary "${v2_hello_ascii:26:certificate_len}" | \ + $OPENSSL x509 -inform DER -outform PEM -out $HOSTCERT 2>$ERRFILE + if [[ $? -ne 0 ]]; then + debugme echo "Malformed certificate in ServerHello." + return 1 + fi + get_pub_key_size + echo "======================================" >> $TMPFILE + fi + + # Output list of supported ciphers + offset=$((certificate_len+26)) + nr_ciphers_detected=$((V2_HELLO_CIPHERSPEC_LENGTH / 3)) + for (( i=0 ; i> $TMPFILE + offset=$((offset+6)) + done + echo "======================================" >> $TMPFILE + + tmpfile_handle ${FUNCNAME[0]}.txt + fi + return $ret +} + +# arg1: hash function +# arg2: key +# arg3: text +hmac() { + local hash_fn="$1" + local key="$2" text="$3" output + local -i ret + + if [[ ! "$OSSL_NAME" =~ LibreSSL ]] && [[ $OSSL_VER_MAJOR.$OSSL_VER_MINOR == 3.0.0* ]]; then + output="$(asciihex_to_binary "$text" | $OPENSSL mac -macopt digest:"${hash_fn/-/}" -macopt hexkey:"$key" HMAC 2>/dev/null)" + ret=$? + tm_out "$(strip_lf "$output")" + else + output="$(asciihex_to_binary "$text" | $OPENSSL dgst "$hash_fn" -mac HMAC -macopt hexkey:"$key" 2>/dev/null)" + ret=$? + tm_out "$(awk '/=/ { print $2 }' <<< "$output")" + fi + return $ret +} + +# arg1: hash function +# arg2: pseudorandom key (PRK) +# arg2: info +# arg3: length of output keying material in octets +# See RFC 5869, Section 2.3 +hkdf-expand() { + local hash_fn="$1" + local prk="$2" info="$3" output="" + local -i out_len="$4" + local -i i n mod_check hash_len ret + local counter + local ti tim1 # T(i) and T(i-1) + + case "$hash_fn" in + "-sha256") hash_len=32 ;; + "-sha384") hash_len=48 ;; + *) return 7 + esac + + n=$out_len/$hash_len + mod_check=$out_len%$hash_len + [[ $mod_check -ne 0 ]] && n+=1 + + tim1="" + for (( i=1; i <= n; i++ )); do + counter="$(printf "%02X\n" $i)" + ti="$(hmac "$hash_fn" "$prk" "$tim1$info$counter")" + [[ $? -ne 0 ]] && return 7 + output+="$ti" + tim1="$ti" + done + out_len=2*$out_len + tm_out "${output:0:out_len}" + return 0 +} + +# arg1: hash function +# arg2: secret +# arg3: label +# arg4: context +# arg5: length +# See RFC 8446, Section 7.1 +hkdf-expand-label() { + local hash_fn="$1" + local secret="$2" label="$3" + local context="$4" + local -i length="$5" + local hkdflabel hkdflabel_label hkdflabel_context + local hkdflabel_length + local -i len + + hkdflabel_length="$(printf "%04X\n" $length)" + if [[ "${TLS_SERVER_HELLO:8:2}" == "7F" ]] && [[ 0x${TLS_SERVER_HELLO:10:2} -lt 0x14 ]]; then + # "544c5320312e332c20" = "TLS 1.3, " + hkdflabel_label="544c5320312e332c20$label" + else + # "746c73313320" = "tls13 " + hkdflabel_label="746c73313320$label" + fi + len=${#hkdflabel_label}/2 + hkdflabel_label="$(printf "%02X\n" $len)$hkdflabel_label" + len=${#context}/2 + hkdflabel_context="$(printf "%02X\n" $len)$context" + hkdflabel="$hkdflabel_length$hkdflabel_label$hkdflabel_context" + + hkdf-expand "$hash_fn" "$secret" "$hkdflabel" "$length" + return $? +} + +# arg1: hash function +# arg2: secret +# arg3: label +# arg4: ASCII-HEX of messages +# See RFC 8446, Section 7.1 +derive-secret() { + local hash_fn="$1" + local secret="$2" label="$3" messages="$4" + local hash_messages + local -i hash_len retcode + + case "$hash_fn" in + "-sha256") hash_len=32 ;; + "-sha384") hash_len=48 ;; + *) return 7 + esac + + hash_messages="$(asciihex_to_binary "$messages" | $OPENSSL dgst "$hash_fn" 2>/dev/null | awk '/=/ { print $2 }')" + hkdf-expand-label "$hash_fn" "$secret" "$label" "$hash_messages" "$hash_len" + return $? +} + +# arg1: hash function +# arg2: private key file +# arg3: file containing server's ephemeral public key +# arg4: ASCII-HEX of messages (ClientHello...ServerHello) +# See key derivation schedule diagram in Section 7.1 of RFC 8446 +derive-handshake-traffic-secret() { + local hash_fn="$1" + local priv_file="$2" pub_file="$3" + local messages="$4" + local -i i ret + local early_secret derived_secret shared_secret handshake_secret + + "$HAS_PKUTIL" || return 1 + + # early_secret="$(hmac "$hash_fn" "000...000" "000...000")" + case "$hash_fn" in + "-sha256") early_secret="33ad0a1c607ec03b09e6cd9893680ce210adf300aa1f2660e1b22e10f170f92a" + if [[ "${TLS_SERVER_HELLO:8:2}" == "7F" ]] && [[ 0x${TLS_SERVER_HELLO:10:2} -lt 0x14 ]]; then + # "6465726976656420736563726574" = "derived secret" + # derived_secret="$(derive-secret "$hash_fn" "$early_secret" "6465726976656420736563726574" "")" + derived_secret="c1c0c36bf8fb1d1afa949fbd360e71af69a6244a4c2eaef5bbbb6442a7277d2c" + else + # "64657269766564" = "derived" + # derived_secret="$(derive-secret "$hash_fn" "$early_secret" "64657269766564" "")" + derived_secret="6f2615a108c702c5678f54fc9dbab69716c076189c48250cebeac3576c3611ba" + fi + ;; + "-sha384") early_secret="7ee8206f5570023e6dc7519eb1073bc4e791ad37b5c382aa10ba18e2357e716971f9362f2c2fe2a76bfd78dfec4ea9b5" + if [[ "${TLS_SERVER_HELLO:8:2}" == "7F" ]] && [[ 0x${TLS_SERVER_HELLO:10:2} -lt 0x14 ]]; then + # "6465726976656420736563726574" = "derived secret" + # derived_secret="$(derive-secret "$hash_fn" "$early_secret" "6465726976656420736563726574" "")" + derived_secret="54c80fa05ee9e0532ce3db8ddeca37a0365683bcd3b27bdc88d2b9fdc115ca4ebc8edc1f0b72a6a0861e803fc34761ef" + else + # "64657269766564" = "derived" + # derived_secret="$(derive-secret "$hash_fn" "$early_secret" "64657269766564" "")" + derived_secret="1591dac5cbbf0330a4a84de9c753330e92d01f0a88214b4464972fd668049e93e52f2b16fad922fdc0584478428f282b" + fi + ;; + *) return 7 + esac + + shared_secret="$($OPENSSL pkeyutl -derive -inkey "$priv_file" -peerkey "$pub_file" 2>/dev/null | hexdump -v -e '16/1 "%02X"')" + + # For draft 18 use $early_secret rather than $derived_secret. + if [[ "${TLS_SERVER_HELLO:8:4}" == "7F12" ]]; then + handshake_secret="$(hmac "$hash_fn" "$early_secret" "${shared_secret%%[!0-9A-F]*}")" + else + handshake_secret="$(hmac "$hash_fn" "$derived_secret" "${shared_secret%%[!0-9A-F]*}")" + fi + [[ $? -ne 0 ]] && return 7 + + if [[ "${TLS_SERVER_HELLO:8:2}" == "7F" ]] && [[ 0x${TLS_SERVER_HELLO:10:2} -lt 0x14 ]]; then + # "7365727665722068616e647368616b65207472616666696320736563726574" = "server handshake traffic secret" + derived_secret="$(derive-secret "$hash_fn" "$handshake_secret" "7365727665722068616e647368616b65207472616666696320736563726574" "$messages")" + else + # "732068732074726166666963" = "s hs traffic" + derived_secret="$(derive-secret "$hash_fn" "$handshake_secret" "732068732074726166666963" "$messages")" + fi + [[ $? -ne 0 ]] && return 7 + tm_out "$derived_secret" + return 0 +} + +# arg1: hash function +# arg2: secret (created by derive-handshake-traffic-secret) +# arg3: purpose ("key" or "iv") +# arg4: length of the key +# See RFC 8446, Section 7.3 +derive-traffic-key() { + local hash_fn="$1" + local secret="$2" purpose="$3" + local -i key_length="$4" + local key + + key="$(hkdf-expand-label "$hash_fn" "$secret" "$purpose" "" "$key_length")" + [[ $? -ne 0 ]] && return 7 + tm_out "$key" + return 0 +} + +#arg1: TLS cipher +#arg2: file containing cipher name, public key, and private key +#arg3: First ClientHello, if response was a HelloRetryRequest +#arg4: HelloRetryRequest, if one was sent +#arg5: Final (or only) ClientHello +#arg6: ServerHello +derive-handshake-traffic-keys() { + local cipher="$1" + local tmpfile="$2" + local clienthello1="$3" hrr="$4" clienthello2="$5" serverhello="$6" + local hash_clienthello1 + local -i key_len + local -i retcode + local hash_fn + local pub_file priv_file tmpfile + local derived_secret server_write_key server_write_iv + + if [[ "$cipher" == *SHA256 ]]; then + hash_fn="-sha256" + elif [[ "$cipher" == *SHA384 ]]; then + hash_fn="-sha384" + else + return 1 + fi + if [[ "$cipher" == *AES_128* ]]; then + key_len=16 + elif ( [[ "$cipher" == *AES_256* ]] || [[ "$cipher" == *CHACHA20_POLY1305* ]] ); then + key_len=32 + else + return 1 + fi + pub_file="$(mktemp "$TEMPDIR/pubkey.XXXXXX")" || return 7 + awk '/-----BEGIN PUBLIC KEY/,/-----END PUBLIC KEY/ { print $0 }' \ + "$tmpfile" > "$pub_file" + [[ ! -s "$pub_file" ]] && return 1 + + priv_file="$(mktemp "$TEMPDIR/privkey.XXXXXX")" || return 7 + if grep -q "\-\-\-\-\-BEGIN EC PARAMETERS" "$tmpfile"; then + awk '/-----BEGIN EC PARAMETERS/,/-----END EC PRIVATE KEY/ { print $0 }' \ + "$tmpfile" > "$priv_file" + else + awk '/-----BEGIN PRIVATE KEY/,/-----END PRIVATE KEY/ { print $0 }' \ + "$tmpfile" > "$priv_file" + fi + [[ ! -s "$priv_file" ]] && return 1 + + if [[ -n "$hrr" ]] && [[ "${serverhello:8:4}" == "7F12" ]]; then + derived_secret="$(derive-handshake-traffic-secret "$hash_fn" "$priv_file" "$pub_file" "$clienthello1$hrr$clienthello2$serverhello")" + elif [[ -n "$hrr" ]]; then + hash_clienthello1="$(asciihex_to_binary "$clienthello1" | $OPENSSL dgst "$hash_fn" 2>/dev/null | awk '/=/ { print $2 }')" + derived_secret="$(derive-handshake-traffic-secret "$hash_fn" "$priv_file" "$pub_file" "FE0000$(printf "%02x" $((${#hash_clienthello1}/2)))$hash_clienthello1$hrr$clienthello2$serverhello")" + else + derived_secret="$(derive-handshake-traffic-secret "$hash_fn" "$priv_file" "$pub_file" "$clienthello2$serverhello")" + fi + retcode=$? + rm $pub_file $priv_file + [[ $retcode -ne 0 ]] && return 1 + # "6b6579" = "key" + server_write_key="$(derive-traffic-key "$hash_fn" "$derived_secret" "6b6579" "$key_len")" + [[ $? -ne 0 ]] && return 1 + # "6976" = "iv" + server_write_iv="$(derive-traffic-key "$hash_fn" "$derived_secret" "6976" "12")" + [[ $? -ne 0 ]] && return 1 + tm_out "$server_write_key $server_write_iv" + return 0 +} + +generate-ccm-gcm-keystream() { + local icb="$1" icb_msb icb_lsb1 + local -i i icb_lsb n="$2" + + icb_msb="${icb:0:24}" + icb_lsb=0x${icb:24:8} + + for (( i=0; i < n; i=i+1 )); do + icb_lsb1="$(printf "%08X" $icb_lsb)" + printf "\x${icb_msb:0:2}\x${icb_msb:2:2}\x${icb_msb:4:2}\x${icb_msb:6:2}\x${icb_msb:8:2}\x${icb_msb:10:2}\x${icb_msb:12:2}\x${icb_msb:14:2}\x${icb_msb:16:2}\x${icb_msb:18:2}\x${icb_msb:20:2}\x${icb_msb:22:2}\x${icb_lsb1:0:2}\x${icb_lsb1:2:2}\x${icb_lsb1:4:2}\x${icb_lsb1:6:2}" + icb_lsb+=1 + done + return 0 +} + +# arg1: an OpenSSL ecb cipher (e.g., -aes-128-ecb) +# arg2: key +# arg3: initial counter value (must be 128 bits) +# arg4: ciphertext +# See Sections 6.5 and 7.2 of SP 800-38D and Section 6.2 and Appendix A of SP 800-38C +ccm-gcm-decrypt() { + local cipher="$1" + local key="$2" + local icb="$3" + local ciphertext="$4" + local -i i i1 i2 i3 i4 + local -i ciphertext_len n mod_check + local y plaintext="" + + [[ ${#icb} -ne 32 ]] && return 7 + + ciphertext_len=${#ciphertext} + n=$ciphertext_len/32 + mod_check=$ciphertext_len%32 + [[ $mod_check -ne 0 ]] && n+=1 + y="$(generate-ccm-gcm-keystream "$icb" "$n" | $OPENSSL enc "$cipher" -K "$key" -nopad 2>/dev/null | hexdump -v -e '16/1 "%02X"')" + + # XOR the ciphertext with the keystream ($y). For efficiency, work in blocks of 16 bytes at a time (but with each XOR operation working on + # 32 bits. + [[ $mod_check -ne 0 ]] && n=$n-1 + for (( i=0; i < n; i++ )); do + i1=32*$i; i2=$i1+8; i3=$i1+16; i4=$i1+24 + plaintext+="$(printf "%08X%08X%08X%08X" "$((0x${ciphertext:i1:8} ^ 0x${y:i1:8}))" "$((0x${ciphertext:i2:8} ^ 0x${y:i2:8}))" "$((0x${ciphertext:i3:8} ^ 0x${y:i3:8}))" "$((0x${ciphertext:i4:8} ^ 0x${y:i4:8}))")" + done + # If the length of the ciphertext is not an even multiple of 16 bytes, then handle the final incomplete block. + if [[ $mod_check -ne 0 ]]; then + i1=32*$n + for (( i=0; i < mod_check; i=i+2 )); do + plaintext+="$(printf "%02X" "$((0x${ciphertext:i1:2} ^ 0x${y:i1:2}))")" + i1+=2 + done + fi + tm_out "$plaintext" + return 0 +} + +# See RFC 7539, Section 2.1 +chacha20_Qround() { + local -i a="0x$1" + local -i b="0x$2" + local -i c="0x$3" + local -i d="0x$4" + local -i x y + + a=$(((a+b) & 0xffffffff)) + d=$((d^a)) + # rotate d left 16 bits + x=$((d & 0xffff0000)) + x=$((x >> 16)) + y=$((d & 0x0000ffff)) + y=$((y << 16)) + d=$((x | y)) + + c=$(((c+d) & 0xffffffff)) + b=$((b^c)) + # rotate b left 12 bits + x=$((b & 0xfff00000)) + x=$((x >> 20)) + y=$((b & 0x000fffff)) + y=$((y << 12)) + b=$((x | y)) + + a=$(((a+b) & 0xffffffff)) + d=$((d^a)) + # rotate d left 8 bits + x=$((d & 0xff000000)) + x=$((x >> 24)) + y=$((d & 0x00ffffff)) + y=$((y << 8)) + d=$((x | y)) + + c=$(((c+d) & 0xffffffff)) + b=$((b^c)) + # rotate b left 7 bits + x=$((b & 0xfe000000)) + x=$((x >> 25)) + y=$((b & 0x01ffffff)) + y=$((y << 7)) + b=$((x | y)) + + tm_out "$(printf "%x" $a) $(printf "%x" $b) $(printf "%x" $c) $(printf "%x" $d)" + return 0 +} + +# See RFC 7539, Section 2.3.1 +chacha20_inner_block() { + local s0="$1" s1="$2" s2="$3" s3="$4" + local s4="$5" s5="$6" s6="$7" s7="$8" + local s8="$9" s9="${10}" s10="${11}" s11="${12}" + local s12="${13}" s13="${14}" s14="${15}" s15="${16}" + local res + + res="$(chacha20_Qround "$s0" "$s4" "$s8" "$s12")" + read -r s0 s4 s8 s12 <<< "$res" + res="$(chacha20_Qround "$s1" "$s5" "$s9" "$s13")" + read -r s1 s5 s9 s13 <<< "$res" + res="$(chacha20_Qround "$s2" "$s6" "$s10" "$s14")" + read -r s2 s6 s10 s14 <<< "$res" + res="$(chacha20_Qround "$s3" "$s7" "$s11" "$s15")" + read -r s3 s7 s11 s15 <<< "$res" + res="$(chacha20_Qround "$s0" "$s5" "$s10" "$s15")" + read -r s0 s5 s10 s15 <<< "$res" + res="$(chacha20_Qround "$s1" "$s6" "$s11" "$s12")" + read -r s1 s6 s11 s12 <<< "$res" + res="$(chacha20_Qround "$s2" "$s7" "$s8" "$s13")" + read -r s2 s7 s8 s13 <<< "$res" + res="$(chacha20_Qround "$s3" "$s4" "$s9" "$s14")" + read -r s3 s4 s9 s14 <<< "$res" + + tm_out "$s0 $s1 $s2 $s3 $s4 $s5 $s6 $s7 $s8 $s9 $s10 $s11 $s12 $s13 $s14 $s15" + return 0 +} + +# See RFC 7539, Sections 2.3 and 2.3.1 +chacha20_block() { + local key="$1" + local counter="$2" + local nonce="$3" + local s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 + local ws0 ws1 ws2 ws3 ws4 ws5 ws6 ws7 ws8 ws9 ws10 ws11 ws12 ws13 ws14 ws15 + local working_state + local -i i + + # create the state variable + s0="61707865"; s1="3320646e"; s2="79622d32"; s3="6b206574" + s4="${key:6:2}${key:4:2}${key:2:2}${key:0:2}" + s5="${key:14:2}${key:12:2}${key:10:2}${key:8:2}" + s6="${key:22:2}${key:20:2}${key:18:2}${key:16:2}" + s7="${key:30:2}${key:28:2}${key:26:2}${key:24:2}" + s8="${key:38:2}${key:36:2}${key:34:2}${key:32:2}" + s9="${key:46:2}${key:44:2}${key:42:2}${key:40:2}" + s10="${key:54:2}${key:52:2}${key:50:2}${key:48:2}" + s11="${key:62:2}${key:60:2}${key:58:2}${key:56:2}" + s12="$counter" + s13="${nonce:6:2}${nonce:4:2}${nonce:2:2}${nonce:0:2}" + s14="${nonce:14:2}${nonce:12:2}${nonce:10:2}${nonce:8:2}" + s15="${nonce:22:2}${nonce:20:2}${nonce:18:2}${nonce:16:2}" + + # Initialize working_state to state + working_state="$s0 $s1 $s2 $s3 $s4 $s5 $s6 $s7 $s8 $s9 $s10 $s11 $s12 $s13 $s14 $s15" + + # compute the 20 rounds (10 calls to inner block function, each of which + # performs 8 quarter rounds). + for (( i=0 ; i < 10; i++ )); do + working_state="$(chacha20_inner_block $working_state)" + done + read -r ws0 ws1 ws2 ws3 ws4 ws5 ws6 ws7 ws8 ws9 ws10 ws11 ws12 ws13 ws14 ws15 <<< "$working_state" + + # Add working state to state + s0="$(printf "%08X" $(((0x$s0+0x$ws0) & 0xffffffff)))" + s1="$(printf "%08X" $(((0x$s1+0x$ws1) & 0xffffffff)))" + s2="$(printf "%08X" $(((0x$s2+0x$ws2) & 0xffffffff)))" + s3="$(printf "%08X" $(((0x$s3+0x$ws3) & 0xffffffff)))" + s4="$(printf "%08X" $(((0x$s4+0x$ws4) & 0xffffffff)))" + s5="$(printf "%08X" $(((0x$s5+0x$ws5) & 0xffffffff)))" + s6="$(printf "%08X" $(((0x$s6+0x$ws6) & 0xffffffff)))" + s7="$(printf "%08X" $(((0x$s7+0x$ws7) & 0xffffffff)))" + s8="$(printf "%08X" $(((0x$s8+0x$ws8) & 0xffffffff)))" + s9="$(printf "%08X" $(((0x$s9+0x$ws9) & 0xffffffff)))" + s10="$(printf "%08X" $(((0x$s10+0x$ws10) & 0xffffffff)))" + s11="$(printf "%08X" $(((0x$s11+0x$ws11) & 0xffffffff)))" + s12="$(printf "%08X" $(((0x$s12+0x$ws12) & 0xffffffff)))" + s13="$(printf "%08X" $(((0x$s13+0x$ws13) & 0xffffffff)))" + s14="$(printf "%08X" $(((0x$s14+0x$ws14) & 0xffffffff)))" + s15="$(printf "%08X" $(((0x$s15+0x$ws15) & 0xffffffff)))" + + # serialize the state + s0="${s0:6:2}${s0:4:2}${s0:2:2}${s0:0:2}" + s1="${s1:6:2}${s1:4:2}${s1:2:2}${s1:0:2}" + s2="${s2:6:2}${s2:4:2}${s2:2:2}${s2:0:2}" + s3="${s3:6:2}${s3:4:2}${s3:2:2}${s3:0:2}" + s4="${s4:6:2}${s4:4:2}${s4:2:2}${s4:0:2}" + s5="${s5:6:2}${s5:4:2}${s5:2:2}${s5:0:2}" + s6="${s6:6:2}${s6:4:2}${s6:2:2}${s6:0:2}" + s7="${s7:6:2}${s7:4:2}${s7:2:2}${s7:0:2}" + s8="${s8:6:2}${s8:4:2}${s8:2:2}${s8:0:2}" + s9="${s9:6:2}${s9:4:2}${s9:2:2}${s9:0:2}" + s10="${s10:6:2}${s10:4:2}${s10:2:2}${s10:0:2}" + s11="${s11:6:2}${s11:4:2}${s11:2:2}${s11:0:2}" + s12="${s12:6:2}${s12:4:2}${s12:2:2}${s12:0:2}" + s13="${s13:6:2}${s13:4:2}${s13:2:2}${s13:0:2}" + s14="${s14:6:2}${s14:4:2}${s14:2:2}${s14:0:2}" + s15="${s15:6:2}${s15:4:2}${s15:2:2}${s15:0:2}" + + tm_out "$s0$s1$s2$s3$s4$s5$s6$s7$s8$s9$s10$s11$s12$s13$s14$s15" + return 0 +} + +# See RFC 7539, Section 2.4 +chacha20() { + local key="$1" + local -i counter=1 + local nonce="$2" + local ciphertext="$3" + local -i i ciphertext_len num_blocks mod_check + local -i i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 + local keystream plaintext="" + + ciphertext_len=${#ciphertext} + num_blocks=$ciphertext_len/128 + + for (( i=0; i < num_blocks; i++)); do + i1=128*$i; i2=$i1+8; i3=$i1+16; i4=$i1+24; i5=$i1+32; i6=$i1+40; i7=$i1+48; i8=$i1+56 + i9=$i1+64; i10=$i1+72; i11=$i1+80; i12=$i1+88; i13=$i1+96; i14=$i1+104; i15=$i1+112; i16=$i1+120 + keystream="$(chacha20_block "$key" "$(printf "%08X" $counter)" "$nonce")" + plaintext+="$(printf "%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X%08X" \ + "$((0x${ciphertext:i1:8} ^ 0x${keystream:0:8}))" \ + "$((0x${ciphertext:i2:8} ^ 0x${keystream:8:8}))" \ + "$((0x${ciphertext:i3:8} ^ 0x${keystream:16:8}))" \ + "$((0x${ciphertext:i4:8} ^ 0x${keystream:24:8}))" \ + "$((0x${ciphertext:i5:8} ^ 0x${keystream:32:8}))" \ + "$((0x${ciphertext:i6:8} ^ 0x${keystream:40:8}))" \ + "$((0x${ciphertext:i7:8} ^ 0x${keystream:48:8}))" \ + "$((0x${ciphertext:i8:8} ^ 0x${keystream:56:8}))" \ + "$((0x${ciphertext:i9:8} ^ 0x${keystream:64:8}))" \ + "$((0x${ciphertext:i10:8} ^ 0x${keystream:72:8}))" \ + "$((0x${ciphertext:i11:8} ^ 0x${keystream:80:8}))" \ + "$((0x${ciphertext:i12:8} ^ 0x${keystream:88:8}))" \ + "$((0x${ciphertext:i13:8} ^ 0x${keystream:96:8}))" \ + "$((0x${ciphertext:i14:8} ^ 0x${keystream:104:8}))" \ + "$((0x${ciphertext:i15:8} ^ 0x${keystream:112:8}))" \ + "$((0x${ciphertext:i16:8} ^ 0x${keystream:120:8}))")" + counter+=1 + done + + mod_check=$ciphertext_len%128 + if [[ $mod_check -ne 0 ]]; then + keystream="$(chacha20_block "$key" "$(printf "%08X" $counter)" "$nonce")" + i1=128*$num_blocks + for (( i=0; i < mod_check; i=i+2 )); do + plaintext+="$(printf "%02X" "$((0x${ciphertext:i1:2} ^ 0x${keystream:i:2}))")" + i1+=2 + done + fi + tm_out "$plaintext" + return 0 +} + +# arg1: TLS cipher +# arg2: key +# arg3: nonce (must be 96 bits in length) +# arg4: ciphertext +sym-decrypt() { + local cipher="$1" + local key="$2" nonce="$3" + local ciphertext="$4" + local ossl_cipher + local plaintext + local -i ciphertext_len tag_len + + case "$cipher" in + *CCM_8*) + tag_len=16 ;; + *CCM*|*GCM*|*CHACHA20_POLY1305*) + tag_len=32 ;; + *) + return 7 ;; + esac + + # The final $tag_len characters of the ciphertext are the authentication tag + ciphertext_len=${#ciphertext} + [[ $ciphertext_len -lt $tag_len ]] && return 7 + ciphertext_len=$ciphertext_len-$tag_len + + if [[ "$cipher" =~ CHACHA20_POLY1305 ]]; then + if "$HAS_CHACHA20"; then + plaintext="$(asciihex_to_binary "${ciphertext:0:ciphertext_len}" | \ + $OPENSSL enc -chacha20 -K "$key" -iv "01000000$nonce" 2>/dev/null | hexdump -v -e '16/1 "%02X"')" + plaintext="$(strip_spaces "$plaintext")" + else + plaintext="$(chacha20 "$key" "$nonce" "${ciphertext:0:ciphertext_len}")" + fi + elif [[ "$cipher" == TLS_AES_128_GCM_SHA256 ]] && "$HAS_AES128_GCM"; then + plaintext="$(asciihex_to_binary "${ciphertext:0:ciphertext_len}" | \ + $OPENSSL enc -aes-128-gcm -K "$key" -iv "$nonce" 2>/dev/null | hexdump -v -e '16/1 "%02X"')" + plaintext="$(strip_spaces "$plaintext")" + elif [[ "$cipher" == TLS_AES_256_GCM_SHA384 ]] && "$HAS_AES256_GCM"; then + plaintext="$(asciihex_to_binary "${ciphertext:0:ciphertext_len}" | \ + $OPENSSL enc -aes-256-gcm -K "$key" -iv "$nonce" 2>/dev/null | hexdump -v -e '16/1 "%02X"')" + plaintext="$(strip_spaces "$plaintext")" + else + if [[ "$cipher" =~ AES_128 ]]; then + ossl_cipher="-aes-128-ecb" + elif [[ "$cipher" =~ AES_256 ]]; then + ossl_cipher="-aes-256-ecb" + else + return 7 + fi + if [[ "$cipher" =~ CCM ]]; then + plaintext="$(ccm-gcm-decrypt "$ossl_cipher" "$key" "02${nonce}000001" "${ciphertext:0:ciphertext_len}")" + else # GCM + plaintext="$(ccm-gcm-decrypt "$ossl_cipher" "$key" "${nonce}00000002" "${ciphertext:0:ciphertext_len}")" + fi + fi + [[ $? -ne 0 ]] && return 7 + + tm_out "$plaintext" + return 0 +} + +# arg1: iv +# arg2: sequence number +get-nonce() { + local iv="$1" + local -i seq_num="$2" + local -i len lsb + local msb nonce + + len=${#iv} + [[ $len -lt 8 ]] && return 7 + i=$len-8 + msb="${iv:0:i}" + lsb="0x${iv:i:8}" + nonce="${msb}$(printf "%08X" "$((lsb ^ seq_num))")" + tm_out "$nonce" + return 0 +} + +# Return: +# 0 if arg1 contains the entire server response. +# 1 if arg1 does not contain the entire server response. +# 2 if the response is malformed. +# 3 if (a) the response version is TLSv1.3; +# (b) arg1 contains the entire ServerHello (and appears to contain the entire response); +# (c) the entire response is supposed to be parsed; and +# (d) the key and IV have not been provided to decrypt the response. +# arg1: ASCII-HEX encoded reply +# arg2: whether to process the full request ("all") or just the basic request plus the ephemeral key if any ("ephemeralkey"). +# arg3: TLS cipher for decrypting TLSv1.3 response +# arg4: key and IV for decrypting TLSv1.3 response +check_tls_serverhellodone() { + local tls_hello_ascii="$1" + local process_full="$2" + local cipher="$3" + local key_and_iv="$4" + local tls_handshake_ascii="" tls_alert_ascii="" + local -i i tls_hello_ascii_len tls_handshake_ascii_len tls_alert_ascii_len + local -i msg_len remaining tls_serverhello_ascii_len sid_len + local -i j offset tls_extensions_len extension_len + local tls_content_type tls_protocol tls_handshake_type tls_msg_type extension_type + local tls_err_level + local key iv + local -i seq_num=0 plaintext_len + local plaintext decrypted_response="" + + DETECTED_TLS_VERSION="" + + [[ -n "$key_and_iv" ]] && read -r key iv <<< "$key_and_iv" + + if [[ -z "$tls_hello_ascii" ]]; then + return 0 # no server hello received + fi + + tls_hello_ascii_len=${#tls_hello_ascii} + for (( i=0; i $TMPFILE + + [[ "$DEBUG" -ge 5 ]] && echo $tls_hello_ascii # one line without any blanks + + # Client messages, including handshake messages, are carried by the record layer. + # First, extract the handshake and alert messages. + # see https://en.wikipedia.org/wiki/Transport_Layer_Security-SSL#TLS_record + # byte 0: content type: 0x14=CCS, 0x15=TLS alert x16=Handshake, 0x17 Application, 0x18=HB + # byte 1+2: TLS version word, major is 03, minor 00=SSL3, 01=TLS1 02=TLS1.1 03=TLS 1.2 + # byte 3+4: fragment length + # bytes 5...: message fragment + tls_hello_ascii_len=${#tls_hello_ascii} + if [[ $DEBUG -ge 3 ]] && [[ $tls_hello_ascii_len -gt 0 ]]; then + echo "TLS message fragments:" + fi + for (( i=0; i/dev/null)" + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 4 + elif [[ "$tls_hello_ascii" =~ 6130303220 ]]; then + [[ $DEBUG -ge 2 ]] && printf "%s\n" "probably IMAP plaintext reply \"$(hex2ascii "${tls_hello_ascii:0:32}" 2>/dev/null)\"" + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 3 + fi + fi + if [[ $tls_content_type != 14 ]] && [[ $tls_content_type != 15 ]] && \ + [[ $tls_content_type != 16 ]] && [[ $tls_content_type != 17 ]]; then + debugme tmln_warning "Content type other than alert, handshake, change cipher spec, or application data detected." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + elif [[ "${tls_protocol:0:2}" != 03 ]]; then + debugme tmln_warning "Protocol record_version.major is not 03." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + DETECTED_TLS_VERSION=$tls_protocol + + if [[ $msg_len -gt $tls_hello_ascii_len-$i ]]; then + if [[ "$process_full" =~ all ]]; then + debugme tmln_warning "Malformed message." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 7 + else + # This could just be a result of the server's response being split + # across two or more packets. Just grab the part that is available. + msg_len=$tls_hello_ascii_len-$i + fi + fi + + if [[ $tls_content_type == 16 ]]; then + tls_handshake_ascii="$tls_handshake_ascii${tls_hello_ascii:i:msg_len}" + elif [[ $tls_content_type == 15 ]]; then # TLS ALERT + tls_alert_ascii="$tls_alert_ascii${tls_hello_ascii:i:msg_len}" + fi + done + + # Now check the alert messages. + tls_alert_ascii_len=${#tls_alert_ascii} + if [[ "$process_full" =~ all ]] && [[ $tls_alert_ascii_len%4 -ne 0 ]]; then + debugme tmln_warning "Malformed message." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + + if [[ $tls_alert_ascii_len -gt 0 ]]; then + debugme echo "TLS alert messages:" + for (( i=0; i+3 < tls_alert_ascii_len; i=i+4 )); do + tls_err_level=${tls_alert_ascii:i:2} # 1: warning, 2: fatal + j=$i+2 + tls_err_descr_no=${tls_alert_ascii:j:2} + if [[ $DEBUG -ge 1 ]]; then + debugme tm_out " tls_err_descr_no: 0x${tls_err_descr_no} / = $(hex2dec ${tls_err_descr_no})" + tls_alert_descrip="$(tls_alert "$tls_err_descr_no")" + if [[ $DEBUG -ge 2 ]]; then + tmln_out " ($tls_alert_descrip)" + tm_out " tls_err_level: ${tls_err_level}" + fi + case $tls_err_level in + 01) echo -n "warning " >> $TMPFILE + debugme tmln_out " (warning)" ;; + 02) echo -n "fatal " >> $TMPFILE + debugme tmln_out " (fatal)" ;; + esac + echo "alert $tls_alert_descrip" >> $TMPFILE + echo "===============================================================================" >> $TMPFILE + fi + + if [[ "$tls_err_level" != 01 ]] && [[ "$tls_err_level" != 02 ]]; then + debugme tmln_warning "Unexpected AlertLevel (0x$tls_err_level)." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + elif [[ "$tls_err_level" == 02 ]]; then + # Fatal alert + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + done + fi + + # Now extract just the server hello, certificate, certificate status, + # and server key exchange handshake messages. + tls_handshake_ascii_len=${#tls_handshake_ascii} + if [[ $DEBUG -ge 3 ]] && [[ $tls_handshake_ascii_len -gt 0 ]]; then + echo "TLS handshake messages:" + fi + for (( i=0; i/dev/null | hexdump -v -e '16/1 "%02X"')" + tls_certificate_ascii="${tls_certificate_ascii%%[!0-9A-F]*}" + if [[ ${#tls_certificate_ascii} -ne $tls_certificate_ascii_len ]]; then + debugme tmln_warning "Length of uncompressed certificates did not match specified length." + return 1 + fi + fi + fi + done + + if [[ $tls_serverhello_ascii_len -eq 0 ]]; then + debugme echo "server hello empty, TCP connection closed" + DETECTED_TLS_VERSION="closed TCP connection " + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 # no server hello received + elif [[ $tls_serverhello_ascii_len -lt 76 ]]; then + DETECTED_TLS_VERSION="reply malformed" + debugme echo "Malformed response" + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + elif [[ "${tls_handshake_ascii:0:2}" != 02 ]]; then + # the ServerHello MUST be the first handshake message + DETECTED_TLS_VERSION="reply contained no ServerHello" + debugme tmln_warning "The first handshake protocol message is not a ServerHello." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + if [[ $DEBUG -eq 0 ]]; then + echo "CONNECTED(00000003)" > $TMPFILE + else + echo "CONNECTED(00000003)" >> $TMPFILE + fi + + # First parse the server hello handshake message + # byte 0+1: 03, TLS version word see byte 1+2 + # byte 2-5: TLS timestamp for OpenSSL <1.01f + # byte 6-33: random, 28 bytes + # byte 34: session id length + # byte 35+36+sid-len: cipher suite! + # byte 37+sid-len: compression method: 00: none, 01: deflate, 64: LZS + # byte 38+39+sid-len: extension length + tls_protocol2="${tls_serverhello_ascii:0:4}" + DETECTED_TLS_VERSION="$tls_protocol2" + [[ "${DETECTED_TLS_VERSION:0:2}" == 7F ]] && DETECTED_TLS_VERSION="0304" + if [[ "${DETECTED_TLS_VERSION:0:2}" != 03 ]]; then + debugme tmln_warning "server_version.major in ServerHello is not 03." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + + if [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]]; then + tls_hello_time="${tls_serverhello_ascii:4:8}" + [[ "$TLS_DIFFTIME_SET" || "$DEBUG" ]] && TLS_TIME=$(hex2dec "$tls_hello_time") + tls_sid_len_hex="${tls_serverhello_ascii:68:2}" + tls_sid_len=2*$(hex2dec "$tls_sid_len_hex") + offset=$((tls_sid_len+70)) + if [[ $tls_serverhello_ascii_len -lt 76+$tls_sid_len ]]; then + debugme echo "Malformed response" + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + else + offset=68 + fi + + tls_cipher_suite="${tls_serverhello_ascii:offset:4}" + + if [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]]; then + offset=$((tls_sid_len+74)) + tls_compression_method="${tls_serverhello_ascii:offset:2}" + extns_offset=$((tls_sid_len+76)) + else + extns_offset=72 + fi + + if [[ $tls_serverhello_ascii_len -gt $extns_offset ]] && \ + ( [[ "$process_full" =~ all ]] || [[ "$DETECTED_TLS_VERSION" == 0303 ]] || \ + ( [[ "$process_full" == ephemeralkey ]] && [[ "0x${DETECTED_TLS_VERSION:2:2}" -gt "0x03" ]] ) ); then + if [[ $tls_serverhello_ascii_len -lt $extns_offset+4 ]]; then + debugme echo "Malformed response" + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + tls_extensions_len=$(hex2dec "${tls_serverhello_ascii:extns_offset:4}")*2 + if [[ $tls_extensions_len -ne $tls_serverhello_ascii_len-$extns_offset-4 ]]; then + debugme tmln_warning "Malformed message." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + for (( i=0; i> $TMPFILE + offset=$((extns_offset+12+i)) + len1=2*$(hex2dec "${tls_serverhello_ascii:offset:4}") + if [[ $extension_len -lt $len1+4 ]] || [[ $len1 -lt 4 ]]; then + debugme tmln_warning "Malformed supported groups extension." + return 1 + fi + offset=$((offset+4)) + for (( j=0; j < len1; j=j+4 )); do + [[ $j -ne 0 ]] && echo -n ", " >> $TMPFILE + case "${tls_serverhello_ascii:offset:4}" in + "0017") echo -n "secp256r1" >> $TMPFILE ;; + "0018") echo -n "secp384r1" >> $TMPFILE ;; + "0019") echo -n "secp521r1" >> $TMPFILE ;; + "001D") echo -n "X25519" >> $TMPFILE ;; + "001E") echo -n "X448" >> $TMPFILE ;; + "0100") echo -n "ffdhe2048" >> $TMPFILE ;; + "0101") echo -n "ffdhe3072" >> $TMPFILE ;; + "0102") echo -n "ffdhe4096" >> $TMPFILE ;; + "0103") echo -n "ffdhe6144" >> $TMPFILE ;; + "0104") echo -n "ffdhe8192" >> $TMPFILE ;; + *) echo -n "unknown (${tls_serverhello_ascii:offset:4})" >> $TMPFILE ;; + esac + offset=$((offset+4)) + done + echo "" >> $TMPFILE + fi + ;; + 000B) tls_extensions+="TLS server extension \"EC point formats\" (id=11), len=$extension_len\n" ;; + 000C) tls_extensions+="TLS server extension \"SRP\" (id=12), len=$extension_len\n" ;; + 000D) tls_extensions+="TLS server extension \"signature algorithms\" (id=13), len=$extension_len\n" ;; + 000E) tls_extensions+="TLS server extension \"use SRTP\" (id=14), len=$extension_len\n" ;; + 000F) tls_extensions+="TLS server extension \"heartbeat\" (id=15), len=$extension_len\n" ;; + 0010) tls_extensions+="TLS server extension \"application layer protocol negotiation\" (id=16), len=$extension_len\n" + if [[ "$process_full" =~ all ]]; then + if [[ $extension_len -lt 4 ]]; then + debugme echo "Malformed application layer protocol negotiation extension." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + echo -n "ALPN protocol: " >> $TMPFILE + offset=$((extns_offset+12+i)) + j=2*$(hex2dec "${tls_serverhello_ascii:offset:4}") + if [[ $extension_len -ne $j+4 ]] || [[ $j -lt 2 ]]; then + debugme echo "Malformed application layer protocol negotiation extension." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + offset=$((offset+4)) + j=2*$(hex2dec "${tls_serverhello_ascii:offset:2}") + if [[ $extension_len -ne $j+6 ]]; then + debugme echo "Malformed application layer protocol negotiation extension." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + offset=$((offset+2)) + asciihex_to_binary "${tls_serverhello_ascii:offset:j}" >> "$TMPFILE" + echo "" >> $TMPFILE + echo "===============================================================================" >> $TMPFILE + fi + ;; + 0011) tls_extensions+="TLS server extension \"certificate status version 2\" (id=17), len=$extension_len\n" ;; + 0012) tls_extensions+="TLS server extension \"signed certificate timestamps\" (id=18), len=$extension_len\n" ;; + 0013) tls_extensions+="TLS server extension \"client certificate type\" (id=19), len=$extension_len\n" ;; + 0014) tls_extensions+="TLS server extension \"server certificate type\" (id=20), len=$extension_len\n" ;; + 0015) tls_extensions+="TLS server extension \"TLS padding\" (id=21), len=$extension_len\n" ;; + 0016) tls_extensions+="TLS server extension \"encrypt-then-mac\" (id=22), len=$extension_len\n" ;; + 0017) tls_extensions+="TLS server extension \"extended master secret\" (id=23), len=$extension_len\n" ;; + 0018) tls_extensions+="TLS server extension \"token binding\" (id=24), len=$extension_len\n" ;; + 0019) tls_extensions+="TLS server extension \"cached info\" (id=25), len=$extension_len\n" ;; + 0023) tls_extensions+="TLS server extension \"session ticket\" (id=35), len=$extension_len\n" ;; + 0028|0033) + # The key share extension was renumbered from 40 to 51 in TLSv1.3 draft 23 since a few + # implementations have been using 40 for the extended_random extension. Since the + # server's version may not yet have been determined, assume that both values represent the + # key share extension. + if [[ "$extension_type" == "00$KEY_SHARE_EXTN_NR" ]]; then + tls_extensions+="TLS server extension \"key share\"" + else + tls_extensions+="TLS server extension \"unrecognized extension\"" + fi + if [[ "$extension_type" == 0028 ]]; then + tls_extensions+=" (id=40), len=$extension_len\n" + else + tls_extensions+=" (id=51), len=$extension_len\n" + fi + if [[ "$process_full" =~ all ]] || [[ "$process_full" == ephemeralkey ]]; then + if [[ $extension_len -lt 4 ]]; then + debugme tmln_warning "Malformed key share extension." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + offset=$((extns_offset+12+i)) + named_curve=$(hex2dec "${tls_serverhello_ascii:offset:4}") + offset=$((extns_offset+16+i)) + msg_len=2*"$(hex2dec "${tls_serverhello_ascii:offset:4}")" + if [[ $msg_len -ne $extension_len-8 ]]; then + debugme tmln_warning "Malformed key share extension." + [[ $DEBUG -ge 1 ]] && tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + case $named_curve in + 21) dh_bits=224 ; named_curve_str="P-224" ; named_curve_oid="06052b81040021" ;; + 23) dh_bits=256 ; named_curve_str="P-256" ; named_curve_oid="06082a8648ce3d030107" ;; + 24) dh_bits=384 ; named_curve_str="P-384" ; named_curve_oid="06052b81040022" ;; + 25) dh_bits=521 ; named_curve_str="P-521" ; named_curve_oid="06052b81040023" ;; + 29) dh_bits=253 ; named_curve_str="X25519" ;; + 30) dh_bits=448 ; named_curve_str="X448" ;; + 256) dh_bits=2048 ; named_curve_str="ffdhe2048" ;; + 257) dh_bits=3072 ; named_curve_str="ffdhe3072" ;; + 258) dh_bits=4096 ; named_curve_str="ffdhe4096" ;; + 259) dh_bits=6144 ; named_curve_str="ffdhe6144" ;; + 260) dh_bits=8192 ; named_curve_str="ffdhe8192" ;; + *) named_curve_str="" ; named_curve_oid="" ;; + esac + offset=$((extns_offset+20+i)) + if ! "$HAS_PKEY"; then + # The key can't be extracted without the pkey utility. + key_bitstring="" + elif [[ $named_curve -eq 29 ]]; then + key_bitstring="302a300506032b656e032100${tls_serverhello_ascii:offset:msg_len}" + elif [[ $named_curve -eq 30 ]]; then + key_bitstring="3042300506032b656f033900${tls_serverhello_ascii:offset:msg_len}" + elif [[ $named_curve -lt 256 ]] && [[ -n "$named_curve_oid" ]]; then + len1="$(printf "%02x" $((msg_len/2+1)))" + [[ "0x${len1}" -ge "0x80" ]] && len1="81${len1}" + key_bitstring="03${len1}00${tls_serverhello_ascii:offset:msg_len}" + len2="$(printf "%02x" $((${#named_curve_oid}/2+9)))" + len3="$(printf "%02x" $((${#named_curve_oid}/2+${#key_bitstring}/2+11)))" + [[ "0x${len3}" -ge "0x80" ]] && len3="81${len3}" + key_bitstring="30${len3}30${len2}06072a8648ce3d0201${named_curve_oid}${key_bitstring}" + elif [[ "$named_curve_str" =~ "ffdhe" ]] && [[ "${TLS13_KEY_SHARES[named_curve]}" =~ "BEGIN" ]]; then + dh_param="$($OPENSSL pkey -pubout -outform DER 2>>$ERRFILE <<< "${TLS13_KEY_SHARES[named_curve]}" | hexdump -v -e '16/1 "%02X"')" + + # First is the length of the public-key SEQUENCE, and it is always encoded in four bytes (3082xxxx) + # Next is the length of the parameters SEQUENCE, and it is also always encoded in four bytes (3082xxxx) + dh_param_len=8+2*"$(hex2dec "${dh_param:12:4}")" + dh_param="${dh_param:8:dh_param_len}" + if [[ "0x${tls_serverhello_ascii:offset:2}" -ge 0x80 ]]; then + key_bitstring="00${tls_serverhello_ascii:offset:msg_len}" + msg_len+=2 + else + key_bitstring="${tls_serverhello_ascii:offset:msg_len}" + fi + len1="$(printf "%04x" $((msg_len/2)))" + key_bitstring="0282${len1}$key_bitstring" + len1="$(printf "%04x" $((${#key_bitstring}/2+1)))" + key_bitstring="${dh_param}0382${len1}00$key_bitstring" + len1="$(printf "%04x" $((${#key_bitstring}/2)))" + key_bitstring="3082${len1}$key_bitstring" + fi + if [[ -n "$key_bitstring" ]]; then + key_bitstring="$(asciihex_to_binary "$key_bitstring" | $OPENSSL pkey -pubin -inform DER 2>$ERRFILE)" + if [[ -z "$key_bitstring" ]] && [[ $DEBUG -ge 2 ]]; then + if [[ -n "$named_curve_str" ]]; then + prln_warning "Your $OPENSSL doesn't support $named_curve_str" + else + prln_warning "Your $OPENSSL doesn't support named curve $named_curve" + fi + fi + fi + fi + ;; + 0029) tls_extensions+="TLS server extension \"pre-shared key\" (id=41), len=$extension_len\n" ;; + 002A) tls_extensions+="TLS server extension \"early data\" (id=42), len=$extension_len\n" ;; + 002B) tls_extensions+="TLS server extension \"supported versions\" (id=43), len=$extension_len\n" + if [[ $extension_len -ne 4 ]]; then + debugme tmln_warning "Malformed supported versions extension." + return 1 + fi + offset=$((extns_offset+12+i)) + tls_protocol2="${tls_serverhello_ascii:offset:4}" + DETECTED_TLS_VERSION="$tls_protocol2" + [[ "${DETECTED_TLS_VERSION:0:2}" == 7F ]] && DETECTED_TLS_VERSION="0304" + ;; + 002C) tls_extensions+="TLS server extension \"cookie\" (id=44), len=$extension_len\n" ;; + 002D) tls_extensions+="TLS server extension \"psk key exchange modes\" (id=45), len=$extension_len\n" ;; + 002E) tls_extensions+="TLS server extension \"ticket early data info\" (id=46), len=$extension_len\n" ;; + 002F) tls_extensions+="TLS server extension \"certificate authorities\" (id=47), len=$extension_len\n" ;; + 0030) tls_extensions+="TLS server extension \"oid filters\" (id=48), len=$extension_len\n" ;; + 0031) tls_extensions+="TLS server extension \"post handshake auth\" (id=49), len=$extension_len\n" ;; + 3374) tls_extensions+="TLS server extension \"next protocol\" (id=13172), len=$extension_len\n" + if [[ "$process_full" =~ all ]]; then + local -i protocol_len + echo -n "Protocols advertised by server: " >> $TMPFILE + offset=$((extns_offset+12+i)) + for (( j=0; j> "$TMPFILE" + offset=$((offset+protocol_len)) + [[ $j+$protocol_len+2 -lt $extension_len ]] && echo -n ", " >> $TMPFILE + done + echo "" >> $TMPFILE + echo "===============================================================================" >> $TMPFILE + fi + ;; + FF01) tls_extensions+="TLS server extension \"renegotiation info\" (id=65281), len=$extension_len\n" ;; + *) tls_extensions+="TLS server extension \"unrecognized extension\" (id=$(printf "%d\n\n" "0x$extension_type")), len=$extension_len\n" ;; + esac + # After processing all of the extensions in the ServerHello message, + # if it has been determined that the response is TLSv1.3 and the + # response was decrypted, then modify $tls_serverhello_ascii by adding + # the extensions from the EncryptedExtensions and Certificate messages + # and then process them. + if ! "$added_encrypted_extensions" && [[ "$DETECTED_TLS_VERSION" == "0304" ]] && \ + [[ $((i+8+extension_len)) -eq $tls_extensions_len ]]; then + # Note that the encrypted extensions have been added so that + # the aren't added a second time. + added_encrypted_extensions=true + if [[ -n "$tls_encryptedextensions_ascii" ]]; then + tls_serverhello_ascii_len+=$tls_encryptedextensions_ascii_len-4 + tls_extensions_len+=$tls_encryptedextensions_ascii_len-4 + tls_encryptedextensions_ascii_len=$tls_encryptedextensions_ascii_len/2-2 + offset=$((extns_offset+4)) + tls_serverhello_ascii="${tls_serverhello_ascii:0:extns_offset}$(printf "%04X" $((0x${tls_serverhello_ascii:extns_offset:4}+$tls_encryptedextensions_ascii_len)))${tls_serverhello_ascii:offset}${tls_encryptedextensions_ascii:4}" + fi + if [[ -n "$tls_certificate_ascii" ]]; then + # In TLS 1.3, the Certificate message begins with a zero length certificate_request_context. + # In addition, certificate_list is now a list of (certificate, extension) pairs rather than + # just certificates. So, extract the extensions and add them to $tls_serverhello_ascii and + # create a new $tls_certificate_ascii that only contains a list of certificates. + if [[ -n "$tls_certificate_ascii" ]]; then + if [[ "${tls_certificate_ascii:0:2}" != "00" ]]; then + debugme tmln_warning "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + if [[ $tls_certificate_ascii_len -lt 8 ]]; then + debugme tmln_warning "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + certificate_list_len=2*$(hex2dec "${tls_certificate_ascii:2:6}") + if [[ $certificate_list_len -ne $tls_certificate_ascii_len-8 ]]; then + debugme tmln_warning "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + for (( j=8; j < tls_certificate_ascii_len; j=j+extn_len )); do + if [[ $tls_certificate_ascii_len-$j -lt 6 ]]; then + debugme tmln_warning "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + certificate_len=2*$(hex2dec "${tls_certificate_ascii:j:6}") + if [[ $certificate_len -gt $tls_certificate_ascii_len-$j-6 ]]; then + debugme tmln_warning "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + len1=$certificate_len+6 + tls_revised_certificate_msg+="${tls_certificate_ascii:j:len1}" + j+=$len1 + extn_len=2*$(hex2dec "${tls_certificate_ascii:j:4}") + j+=4 + # TODO: Should only the extensions associated with the EE certificate be added to $tls_serverhello_ascii? + tls_serverhello_ascii_len+=$extn_len + tls_extensions_len+=$extn_len + offset=$((extns_offset+4)) + tls_serverhello_ascii="${tls_serverhello_ascii:0:extns_offset}$(printf "%04X" $(( 0x${tls_serverhello_ascii:extns_offset:4}+extn_len/2)) )${tls_serverhello_ascii:offset}${tls_certificate_ascii:j:extn_len}" + done + tls_certificate_ascii_len=${#tls_revised_certificate_msg}+6 + tls_certificate_ascii="$(printf "%06X" $(( tls_certificate_ascii_len/2-3)) )$tls_revised_certificate_msg" + fi + fi + fi + done + fi + + if [[ "$DETECTED_TLS_VERSION" == "0300" ]]; then + echo "Protocol : SSLv3" >> $TMPFILE + else + echo "Protocol : TLSv1.$((0x$DETECTED_TLS_VERSION-0x0301))" >> $TMPFILE + fi + echo "===============================================================================" >> $TMPFILE + if [[ $TLS_NR_CIPHERS -ne 0 ]]; then + if [[ "${tls_cipher_suite:0:2}" == "00" ]]; then + rfc_cipher_suite="$(show_rfc_style "x${tls_cipher_suite:2:2}")" + else + rfc_cipher_suite="$(show_rfc_style "x${tls_cipher_suite:0:4}")" + fi + elif "$HAS_CIPHERSUITES"; then + rfc_cipher_suite="$($OPENSSL ciphers -V -ciphersuites "$TLS13_OSSL_CIPHERS" 'ALL:COMPLEMENTOFALL' | grep -i " 0x${tls_cipher_suite:0:2},0x${tls_cipher_suite:2:2} " | awk '{ print $3 }')" + else + rfc_cipher_suite="$($OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL' | grep -i " 0x${tls_cipher_suite:0:2},0x${tls_cipher_suite:2:2} " | awk '{ print $3 }')" + fi + echo "Cipher : $rfc_cipher_suite" >> $TMPFILE + if [[ $dh_bits -ne 0 ]]; then + if [[ "$named_curve_str" =~ "ffdhe" ]]; then + echo "Server Temp Key: DH, $named_curve_str, $dh_bits bits" >> $TMPFILE + elif [[ "$named_curve_str" == "X25519" ]] || [[ "$named_curve_str" == "X448" ]]; then + echo "Server Temp Key: $named_curve_str, $dh_bits bits" >> $TMPFILE + else + echo "Server Temp Key: ECDH, $named_curve_str, $dh_bits bits" >> $TMPFILE + fi + fi + if [[ -n "$key_bitstring" ]]; then + echo "$key_bitstring" >> $TMPFILE + [[ "${TLS13_KEY_SHARES[named_curve]}" =~ "BEGIN" ]] && \ + echo "${TLS13_KEY_SHARES[named_curve]}" >> $TMPFILE + fi + echo "===============================================================================" >> $TMPFILE + if [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]]; then + case $tls_compression_method in + 00) echo "Compression: NONE" >> $TMPFILE ;; + 01) echo "Compression: zlib compression" >> $TMPFILE ;; + 40) echo "Compression: LZS compression" >> $TMPFILE ;; + *) echo "Compression: unrecognized compression method" >> $TMPFILE ;; + esac + echo "===============================================================================" >> $TMPFILE + fi + [[ -n "$tls_extensions" ]] && echo -e "$tls_extensions" >> $TMPFILE + + if [[ $DEBUG -ge 3 ]]; then + echo "TLS server hello message:" + if [[ $DEBUG -ge 4 ]]; then + echo " tls_protocol: 0x$tls_protocol2" + [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]] && echo " tls_sid_len: 0x$tls_sid_len_hex / = $((tls_sid_len/2))" + fi + if [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]]; then + echo -n " tls_hello_time: 0x$tls_hello_time " + parse_date "$TLS_TIME" "+%Y-%m-%d %r" "%s" # in debugging mode we don't mind the cycles and don't use TLS_DIFFTIME_SET + fi + echo -n " tls_cipher_suite: 0x$tls_cipher_suite" + if [[ -n "$rfc_cipher_suite" ]]; then + echo " ($rfc_cipher_suite)" + else + echo "" + fi + if [[ $dh_bits -ne 0 ]]; then + if [[ "$named_curve_str" =~ "ffdhe" ]]; then + echo " dh_bits: DH, $named_curve_str, $dh_bits bits" + elif [[ "$named_curve_str" == "X25519" ]] || [[ "$named_curve_str" == "X448" ]]; then + echo " dh_bits: $named_curve_str, $dh_bits bits" + else + echo " dh_bits: ECDH, $named_curve_str, $dh_bits bits" + fi + fi + if [[ "0x${DETECTED_TLS_VERSION:2:2}" -le "0x03" ]]; then + echo -n " tls_compression_method: 0x$tls_compression_method " + case $tls_compression_method in + 00) echo "(NONE)" ;; + 01) echo "(zlib compression)" ;; + 40) echo "(LZS compression)" ;; + *) echo "(unrecognized compression method)" ;; + esac + fi + if [[ -n "$tls_extensions" ]]; then + echo -n " tls_extensions: " + newline_to_spaces "$(grep -a 'TLS server extension ' $TMPFILE | \ + sed -e 's/TLS server extension //g' -e 's/\" (id=/\/#/g' \ + -e 's/,.*$/,/g' -e 's/),$/\"/g' \ + -e 's/elliptic curves\/#10/supported_groups\/#10/g')" + echo "" + if [[ "$tls_extensions" =~ supported_groups ]]; then + echo " Supported Groups: $(grep "Supported groups:" "$TMPFILE" | sed 's/Supported groups: //')" + fi + if [[ "$tls_extensions" =~ application\ layer\ protocol\ negotiation ]]; then + echo " ALPN protocol: $(grep "ALPN protocol:" "$TMPFILE" | sed 's/ALPN protocol: //')" + fi + if [[ "$tls_extensions" =~ next\ protocol ]]; then + echo " NPN protocols: $(grep "Protocols advertised by server:" "$TMPFILE" | sed 's/Protocols advertised by server: //')" + fi + fi + tmln_out + fi + + # If a CIPHER_SUITES string was provided, then check that $tls_cipher_suite is in the string. + # this appeared in yassl + MySQL (https://github.com/drwetter/testssl.sh/pull/784) but adds robustness + # to the implementation + if [[ -n "$cipherlist" ]]; then + tls_cipher_suite="$(tolower "$tls_cipher_suite")" + tls_cipher_suite="${tls_cipher_suite:0:2}\\x${tls_cipher_suite:2:2}" + cipherlist_len=${#cipherlist} + for (( i=0; i < cipherlist_len; i=i+8 )); do + # At the right hand side we need the quotes here! + [[ "${cipherlist:i:6}" == "$tls_cipher_suite" ]] && break + done + if [[ $i -ge $cipherlist_len ]]; then + BAD_SERVER_HELLO_CIPHER=true + debugme echo "The ServerHello specifies a cipher suite that wasn't included in the ClientHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + fi + + # If the ClientHello included a supported_versions extension, then check that the + # $DETECTED_TLS_VERSION appeared in the list offered in the ClientHello. + if [[ "${TLS_CLIENT_HELLO:0:2}" == 01 ]]; then + # get position of cipher lists (just after session id) + offset=78+2*$(hex2dec "${TLS_CLIENT_HELLO:76:2}") + # get position of compression methods + offset+=4+2*$(hex2dec "${TLS_CLIENT_HELLO:offset:4}") + # get position of extensions + extns_offset=$offset+6+2*$(hex2dec "${TLS_CLIENT_HELLO:offset:2}") + len1=${#TLS_CLIENT_HELLO} + for (( i=extns_offset; i < len1; i=i+8+extension_len )); do + extension_type="${TLS_CLIENT_HELLO:i:4}" + offset=4+$i + extension_len=2*$(hex2dec "${TLS_CLIENT_HELLO:offset:4}") + if [[ "$extension_type" == 002b ]]; then + offset+=6 + tls_protocol2="$(tolower "$tls_protocol2")" + for (( j=0; j < extension_len-2; j=j+4 )); do + [[ "${TLS_CLIENT_HELLO:offset:4}" == $tls_protocol2 ]] && break + offset+=4 + done + if [[ $j -eq $extension_len-2 ]]; then + debugme echo "The ServerHello specifies a version that wasn't offered in the ClientHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + break + fi + done + fi + + # Now parse the Certificate message. + if [[ "$process_full" =~ all ]]; then + # not sure why we need this + [[ -e "$HOSTCERT" ]] && rm "$HOSTCERT" + [[ -e "$TEMPDIR/intermediatecerts.pem" ]] && > "$TEMPDIR/intermediatecerts.pem" + fi + if [[ $tls_certificate_ascii_len -ne 0 ]]; then + # The first certificate is the server's certificate. If there are anything + # subsequent certificates, they are intermediate certificates. + if [[ $tls_certificate_ascii_len -lt 12 ]]; then + debugme echo "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + certificate_list_len=2*$(hex2dec "${tls_certificate_ascii:0:6}") + if [[ $certificate_list_len -ne $tls_certificate_ascii_len-6 ]]; then + debugme echo "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + + # Place server's certificate in $HOSTCERT + certificate_len=2*$(hex2dec "${tls_certificate_ascii:6:6}") + if [[ $certificate_len -gt $tls_certificate_ascii_len-12 ]]; then + debugme echo "Malformed Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + asciihex_to_binary "${tls_certificate_ascii:12:certificate_len}" | \ + $OPENSSL x509 -inform DER -outform PEM -out "$HOSTCERT" 2>$ERRFILE + if [[ $? -ne 0 ]]; then + debugme echo "Malformed certificate in Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + get_pub_key_size + echo "===============================================================================" >> $TMPFILE + echo "---" >> $TMPFILE + echo "Certificate chain" >> $TMPFILE + subjectDN="$($OPENSSL x509 -in $HOSTCERT -noout -subject 2>>$ERRFILE)" + issuerDN="$($OPENSSL x509 -in $HOSTCERT -noout -issuer 2>>$ERRFILE)" + echo " $nr_certs s:${subjectDN:9}" >> $TMPFILE + echo " i:${issuerDN:8}" >> $TMPFILE + cat "$HOSTCERT" >> $TMPFILE + + echo "" > "$TEMPDIR/intermediatecerts.pem" + # Place any additional certificates in $TEMPDIR/intermediatecerts.pem + CERTIFICATE_LIST_ORDERING_PROBLEM=false + CAissuerDN="$issuerDN" + for (( i=12+certificate_len; i$ERRFILE)" + if [[ $? -ne 0 ]]; then + debugme echo "Malformed certificate in Certificate Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + nr_certs+=1 + CAsubjectDN="$($OPENSSL x509 -noout -subject 2>>$ERRFILE <<< "$pem_certificate")" + # Check that this certificate certifies the one immediately preceding it. + [[ "${CAsubjectDN:9}" != "${CAissuerDN:8}" ]] && CERTIFICATE_LIST_ORDERING_PROBLEM=true + CAissuerDN="$($OPENSSL x509 -noout -issuer 2>>$ERRFILE <<< "$pem_certificate")" + echo " $nr_certs s:${CAsubjectDN:9}" >> $TMPFILE + echo " i:${CAissuerDN:8}" >> $TMPFILE + echo "$pem_certificate" >> $TMPFILE + echo "$pem_certificate" >> "$TEMPDIR/intermediatecerts.pem" + if [[ -z "$hostcert_issuer" ]] && [[ "${CAsubjectDN:9}" == "${issuerDN:8}" ]]; then + # The issuer's certificate is needed if there is a stapled OCSP response, + # and it may be needed if check_revocation_ocsp() will later be called + # with the OCSP URI in the server's certificate. + hostcert_issuer="$TEMPDIR/hostcert_issuer.pem" + echo "$pem_certificate" > "$hostcert_issuer" + fi + done + echo "---" >> $TMPFILE + echo "Server certificate" >> $TMPFILE + echo "subject=${subjectDN:9}" >> $TMPFILE + echo "issuer=${issuerDN:8}" >> $TMPFILE + echo "---" >> $TMPFILE + fi + + # Now parse the certificate status message + if [[ $tls_certificate_status_ascii_len -ne 0 ]] && [[ $tls_certificate_status_ascii_len -lt 8 ]]; then + debugme echo "Malformed certificate status Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + elif [[ $tls_certificate_status_ascii_len -ne 0 ]] && [[ "${tls_certificate_status_ascii:0:2}" == "01" ]]; then + # This is a certificate status message of type "ocsp" + ocsp_response_len=2*$(hex2dec "${tls_certificate_status_ascii:2:6}") + if [[ $ocsp_response_len -ne $tls_certificate_status_ascii_len-8 ]]; then + debugme echo "Malformed certificate status Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + ocsp_resp_offset=8 + elif [[ $tls_certificate_status_ascii_len -ne 0 ]] && [[ "${tls_certificate_status_ascii:0:2}" == "02" ]]; then + # This is a list of OCSP responses, but only the first one is needed + # since the first one corresponds to the server's certificate. + ocsp_response_list_len=2*$(hex2dec "${tls_certificate_status_ascii:2:6}") + if [[ $ocsp_response_list_len -ne $tls_certificate_status_ascii_len-8 ]] || [[ $ocsp_response_list_len -lt 6 ]]; then + debugme echo "Malformed certificate status Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + ocsp_response_len=2*$(hex2dec "${tls_certificate_status_ascii:8:6}") + if [[ $ocsp_response_len -gt $ocsp_response_list_len-6 ]]; then + debugme echo "Malformed certificate status Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + ocsp_resp_offset=14 + fi + STAPLED_OCSP_RESPONSE="" + if [[ $ocsp_response_len -ne 0 ]]; then + STAPLED_OCSP_RESPONSE="${tls_certificate_status_ascii:ocsp_resp_offset:ocsp_response_len}" + echo "OCSP response:" >> $TMPFILE + echo "===============================================================================" >> $TMPFILE + if [[ -n "$hostcert_issuer" ]]; then + asciihex_to_binary "$STAPLED_OCSP_RESPONSE" | \ + $OPENSSL ocsp -no_nonce -CAfile $TEMPDIR/intermediatecerts.pem -issuer $hostcert_issuer -cert $HOSTCERT -respin /dev/stdin -resp_text >> $TMPFILE 2>$ERRFILE + else + asciihex_to_binary "$STAPLED_OCSP_RESPONSE" | \ + $OPENSSL ocsp -respin /dev/stdin -resp_text >> $TMPFILE 2>$ERRFILE + fi + echo "===============================================================================" >> $TMPFILE + elif [[ "$process_full" =~ all ]]; then + echo "OCSP response: no response sent" >> $TMPFILE + echo "===============================================================================" >> $TMPFILE + fi + + # Now parse the server key exchange message + if [[ $tls_serverkeyexchange_ascii_len -ne 0 ]]; then + if [[ $rfc_cipher_suite =~ TLS_ECDHE_ ]] || [[ $rfc_cipher_suite =~ TLS_ECDH_anon ]] || \ + [[ $rfc_cipher_suite == ECDHE* ]] || [[ $rfc_cipher_suite == AECDH* ]]; then + if [[ $tls_serverkeyexchange_ascii_len -lt 6 ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + curve_type=$(hex2dec "${tls_serverkeyexchange_ascii:0:2}") + if [[ $curve_type -eq 3 ]]; then + # named_curve - the curve is identified by a 2-byte number + named_curve=$(hex2dec "${tls_serverkeyexchange_ascii:2:4}") + # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8 + case $named_curve in + 1) dh_bits=163 ; named_curve_str="K-163" ;; + 2) dh_bits=162 ; named_curve_str="sect163r1" ;; + 3) dh_bits=163 ; named_curve_str="B-163" ;; + 4) dh_bits=193 ; named_curve_str="sect193r1" ;; + 5) dh_bits=193 ; named_curve_str="sect193r2" ;; + 6) dh_bits=232 ; named_curve_str="K-233" ;; + 7) dh_bits=233 ; named_curve_str="B-233" ;; + 8) dh_bits=238 ; named_curve_str="sect239k1" ;; + 9) dh_bits=281 ; named_curve_str="K-283" ;; + 10) dh_bits=282 ; named_curve_str="B-283" ;; + 11) dh_bits=407 ; named_curve_str="K-409" ;; + 12) dh_bits=409 ; named_curve_str="B-409" ;; + 13) dh_bits=570 ; named_curve_str="K-571" ;; + 14) dh_bits=570 ; named_curve_str="B-571" ;; + 15) dh_bits=161 ; named_curve_str="secp160k1" ;; + 16) dh_bits=161 ; named_curve_str="secp160r1" ;; + 17) dh_bits=161 ; named_curve_str="secp160r2" ;; + 18) dh_bits=192 ; named_curve_str="secp192k1" ;; + 19) dh_bits=192 ; named_curve_str="P-192" ;; + 20) dh_bits=225 ; named_curve_str="secp224k1" ;; + 21) dh_bits=224 ; named_curve_str="P-224" ;; + 22) dh_bits=256 ; named_curve_str="secp256k1" ;; + 23) dh_bits=256 ; named_curve_str="P-256" ;; + 24) dh_bits=384 ; named_curve_str="P-384" ;; + 25) dh_bits=521 ; named_curve_str="P-521" ;; + 26) dh_bits=256 ; named_curve_str="brainpoolP256r1" ;; + 27) dh_bits=384 ; named_curve_str="brainpoolP384r1" ;; + 28) dh_bits=512 ; named_curve_str="brainpoolP512r1" ;; + 29) dh_bits=253 ; named_curve_str="X25519" ;; + 30) dh_bits=448 ; named_curve_str="X448" ;; + esac + fi + if [[ $dh_bits -ne 0 ]] && [[ $named_curve -ne 29 ]] && [[ $named_curve -ne 30 ]]; then + [[ $DEBUG -ge 3 ]] && echo -e " dh_bits: ECDH, $named_curve_str, $dh_bits bits\n" + echo "Server Temp Key: ECDH, $named_curve_str, $dh_bits bits" >> $TMPFILE + elif [[ $dh_bits -ne 0 ]]; then + [[ $DEBUG -ge 3 ]] && echo -e " dh_bits: $named_curve_str, $dh_bits bits\n" + echo "Server Temp Key: $named_curve_str, $dh_bits bits" >> $TMPFILE + fi + elif [[ $rfc_cipher_suite =~ TLS_DHE_ ]] || [[ $rfc_cipher_suite =~ TLS_DH_anon ]] || \ + [[ $rfc_cipher_suite == "DHE-"* ]] || [[ $rfc_cipher_suite == "EDH-"* ]] || \ + [[ $rfc_cipher_suite == "EXP1024-DHE-"* ]]; then + # For DH ephemeral keys the first field is p, and the length of + # p is the same as the length of the public key. + if [[ $tls_serverkeyexchange_ascii_len -lt 4 ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + dh_p_len=2*$(hex2dec "${tls_serverkeyexchange_ascii:0:4}") + offset=4+$dh_p_len + if [[ $tls_serverkeyexchange_ascii_len -lt $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + + # Subtract any leading 0 bytes + for (( i=4; i < offset; i=i+2 )); do + [[ "${tls_serverkeyexchange_ascii:i:2}" != "00" ]] && break + dh_p_len=$dh_p_len-2 + done + if [[ $i -ge $offset ]]; then + debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello." + tmpfile_handle ${FUNCNAME[0]}.txt + return 1 + fi + dh_p="${tls_serverkeyexchange_ascii:i:dh_p_len}" + + dh_bits=4*$dh_p_len + msb=$(hex2dec "${tls_serverkeyexchange_ascii:i:2}") + for (( mask=128; msb < mask; mask/=2 )); do + dh_bits=$dh_bits-1 + done + + key_bitstring="$(get_dh_ephemeralkey "$tls_serverkeyexchange_ascii")" + [[ $? -eq 0 ]] && echo "$key_bitstring" >> $TMPFILE + + # Check to see whether the ephemeral public key uses one of the groups from + # RFC 7919 for parameters + case $dh_bits in + 2048) named_curve=256; named_curve_str=" ffdhe2048," ;; + 3072) named_curve=257; named_curve_str=" ffdhe3072," ;; + 4096) named_curve=258; named_curve_str=" ffdhe4096," ;; + 6144) named_curve=259; named_curve_str=" ffdhe6144," ;; + 8192) named_curve=260; named_curve_str=" ffdhe8192," ;; + *) named_curve=0; named_curve_str="" ;; + esac + [[ -z "$key_bitstring" ]] && named_curve=0 && named_curve_str="" + if "$HAS_PKEY" && [[ $named_curve -ne 0 ]] && [[ "${TLS13_KEY_SHARES[named_curve]}" =~ BEGIN ]]; then + ephemeral_param="$($OPENSSL pkey -pubin -text -noout 2>>$ERRFILE <<< "$key_bitstring" | grep -EA 1000 "prime:|prime P:")" + rfc7919_param="$($OPENSSL pkey -text -noout 2>>$ERRFILE <<< "${TLS13_KEY_SHARES[named_curve]}" | grep -EA 1000 "prime:|prime P:")" + [[ "$ephemeral_param" != "$rfc7919_param" ]] && named_curve_str="" + fi + + [[ $DEBUG -ge 3 ]] && [[ $dh_bits -ne 0 ]] && echo -e " dh_bits: DH,$named_curve_str $dh_bits bits\n" + [[ $dh_bits -ne 0 ]] && echo "Server Temp Key: DH,$named_curve_str $dh_bits bits" >> $TMPFILE + fi + fi + tmpfile_handle ${FUNCNAME[0]}.txt + + TLS_SERVER_HELLO="02$(printf "%06x" $(( tls_serverhello_ascii_len/2)) )${tls_serverhello_ascii}" + return 0 +} + + +#arg1 (optional): list of ciphers suites or empty +#arg2 (optional): "true" if full server response should be parsed. +# return: 6: couldn't open socket, 3(!): sslv2 handshake succeeded, 0=no SSLv2 +# 1,4,6,7: see return value of parse_sslv2_serverhello() +sslv2_sockets() { + local ret + local client_hello cipher_suites len_client_hello + local len_ciph_suites_byte len_ciph_suites + local server_hello sock_reply_file2 + local -i response_len server_hello_len + local parse_complete=false + + # this could be empty so swe use '==' + if [[ "$2" == true ]]; then + parse_complete=true + fi + if [[ -n "$1" ]]; then + cipher_suites="$1" + else + cipher_suites=" + 05,00,80, # 1st cipher 9 cipher specs, only classical V2 ciphers are used here, see FIXME below + 03,00,80, # 2nd there are v3 in v2!!! : https://tools.ietf.org/html/rfc6101#appendix-E + 01,00,80, # 3rd Cipher specifications introduced in version 3.0 can be included in version 2.0 client hello messages using + 07,00,c0, # 4th the syntax below. [..] # V2CipherSpec (see Version 3.0 name) = { 0x00, CipherSuite }; !!!! + 08,00,80, # 5th + 06,00,40, # 6th + 04,00,80, # 7th + 02,00,80, # 8th + 06,01,40, # 9 + 07,01,c0, # 10 + FF,80,00, # 11 + FF,80,10, # 12 + 00,00,00" # 13 + # FIXME: http://max.euston.net/d/tip_sslciphers.html <-- also SSLv3 ciphers + fi + + code2network "$cipher_suites" # convert CIPHER_SUITES + cipher_suites="$NW_STR" # we don't have the leading \x here so string length is two byte less, see next + len_ciph_suites_byte=${#cipher_suites} + + let "len_ciph_suites_byte += 2" + len_ciph_suites=$(printf "%02x\n" $(( len_ciph_suites_byte / 4 ))) + len_client_hello=$(printf "%02x\n" $((0x$len_ciph_suites + 0x19))) + + client_hello=" + ,80,$len_client_hello # length + ,01 # Client Hello + ,00,02 # SSLv2 + ,00,$len_ciph_suites # cipher spec length + ,00,00 # session ID length + ,00,10 # challenge length + ,$cipher_suites + ,29,22,be,b3,5a,01,8b,04,fe,5f,80,03,a0,13,eb,c4" # Challenge + # https://idea.popcount.org/2012-06-16-dissecting-ssl-handshake/ (client) + + fd_socket 5 || return 6 + debugme echo -n "sending client hello... " + socksend_clienthello "$client_hello" + + sockread_serverhello 32768 + if "$parse_complete"; then + server_hello=$(hexdump -v -e '16/1 "%02X"' "$SOCK_REPLY_FILE") + server_hello_len=2+$(hex2dec "${server_hello:1:3}") + response_len=$(wc -c "$SOCK_REPLY_FILE" | awk '{ print $1 }') + for (( 1; response_len < server_hello_len; 1 )); do + sock_reply_file2=${SOCK_REPLY_FILE}.2 + mv "$SOCK_REPLY_FILE" "$sock_reply_file2" + + debugme echo -n "requesting more server hello data... " + socksend "" $USLEEP_SND + sockread_serverhello 32768 + + [[ ! -s "$SOCK_REPLY_FILE" ]] && break + cat "$SOCK_REPLY_FILE" >> "$sock_reply_file2" + mv "$sock_reply_file2" "$SOCK_REPLY_FILE" + response_len=$(wc -c "$SOCK_REPLY_FILE" | awk '{ print $1 }') + done + fi + debugme echo "reading server hello... " + if [[ "$DEBUG" -ge 4 ]]; then + hexdump -C "$SOCK_REPLY_FILE" | head -6 + tmln_out + fi + + parse_sslv2_serverhello "$SOCK_REPLY_FILE" "$parse_complete" + ret=$? + + close_socket + tmpfile_handle ${FUNCNAME[0]}.dd $SOCK_REPLY_FILE + return $ret +} + +# arg1: supported groups extension +# arg2: "all" - process full response (including Certificate and certificate_status handshake messages) +# "ephemeralkey" - extract the server's ephemeral key (if any) +# Given the supported groups extension, create a key_share extension that includes a key share for +# each group listed in the supported groups extension. +generate_key_share_extension() { + local supported_groups + local -i i len supported_groups_len group + local extn_len list_len + local key_share key_shares="" + local -i nr_key_shares=0 + + supported_groups="${1//\\x/}" + [[ "${supported_groups:0:4}" != "000a" ]] && return 1 + + supported_groups_len=${#supported_groups} + [[ $supported_groups_len -lt 16 ]] && return 1 + + len=2*$(hex2dec "${supported_groups:4:4}") + [[ $len+8 -ne $supported_groups_len ]] && return 1 + + len=2*$(hex2dec "${supported_groups:8:4}") + [[ $len+12 -ne $supported_groups_len ]] && return 1 + + for (( i=12; i=0; i=i-1 )); do + if [[ 0x$i -eq 4 ]]; then + # FIXME: The ClientHello currently advertises support for various + # draft versions of TLSv1.3. Eventually it should only adversize + # support for the final version (0304). + if [[ "$KEY_SHARE_EXTN_NR" == 33 ]]; then + extension_supported_versions+=", 03, 04, 7f, 1c, 7f, 1b, 7f, 1a, 7f, 19, 7f, 18, 7f, 17" + else + extension_supported_versions+=", 7f, 16, 7f, 15, 7f, 14, 7f, 13, 7f, 12" + fi + else + extension_supported_versions+=", 03, $(printf "%02x" $i)" + fi + done + [[ -n "$all_extensions" ]] && all_extensions+="," + # FIXME: Adjust the lengths ("+15" and "+14") when the draft versions of TLSv1.3 are removed. + if [[ "$KEY_SHARE_EXTN_NR" == "33" ]]; then + all_extensions+="00, 2b, 00, $(printf "%02x" $((2*0x$tls_low_byte+15))), $(printf "%02x" $((2*0x$tls_low_byte+14)))$extension_supported_versions" + else + all_extensions+="00, 2b, 00, $(printf "%02x" $((2*0x$tls_low_byte+11))), $(printf "%02x" $((2*0x$tls_low_byte+10)))$extension_supported_versions" + fi + fi + + # There does not seem to be any reason to include this extension. However, it appears that + # OpenSSL, Firefox, and Chrome include it in TLS 1.3 ClientHello messages, and there is at + # least one server that will fail the connection if it is absent + # (see https://github.com/drwetter/testssl.sh/issues/990). + if [[ "0x$tls_low_byte" -ge 0x04 ]] && [[ ! "$extra_extensions_list" =~ " 002d " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extn_psk_mode" + fi + + if [[ ! "$extra_extensions_list" =~ " 0023 " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_session_ticket" + fi + + # If the ClientHello will include the ALPN extension, then don't include the NPN extension. + if [[ ! "$extra_extensions_list" =~ " 3374 " ]] && [[ ! "$extra_extensions_list" =~ " 0010 " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_next_protocol" + fi + + # RFC 5246 says that clients MUST NOT offer the signature algorithms + # extension if they are offering TLS versions prior to 1.2. + if [[ "0x$tls_low_byte" -ge 0x03 ]] && [[ ! "$extra_extensions_list" =~ " 000d " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_signature_algorithms" + fi + + if [[ -n "$extension_supported_groups" ]] && [[ ! "$extra_extensions_list" =~ " 000a " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_supported_groups" + fi + + if [[ -n "$extensions_key_share" ]] && [[ ! "$extra_extensions_list" =~ " 00$KEY_SHARE_EXTN_NR " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extensions_key_share" + fi + + if [[ -n "$extension_supported_point_formats" ]] && [[ ! "$extra_extensions_list" =~ " 000b " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_supported_point_formats" + fi + + if [[ -n "$extra_extensions" ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extra_extensions" + fi + + # Make sure that a non-empty extension goes last (either heartbeat or padding). + # See PR #792 and https://www.ietf.org/mail-archive/web/tls/current/msg19720.html. + if [[ ! "$extra_extensions_list" =~ " 000f " ]]; then + [[ -n "$all_extensions" ]] && all_extensions+="," + all_extensions+="$extension_heartbeat" + fi + + code2network "$all_extensions" # convert extensions + all_extensions="$NW_STR" # we don't have the leading \x here so string length is two byte less, see next + len_extension=${#all_extensions} + len_extension+=2 + len_extension=$len_extension/4 + len_extension_hex=$(printf "%02x\n" $len_extension) + + # If the length of the Client Hello would be between 256 and 511 bytes, + # then add a padding extension (see RFC 7685) + len_all=$((0x$len_ciph_suites + 0x2b + 0x$len_extension_hex + 0x2)) + "$offer_compression" && len_all+=2 + [[ 0x$tls_low_byte -gt 0x03 ]] && len_all+=32 # TLSv1.3 ClientHello includes a 32-byte session id + if [[ $len_all -ge 256 ]] && [[ $len_all -le 511 ]] && [[ ! "$extra_extensions_list" =~ " 0015 " ]]; then + if [[ $len_all -ge 508 ]]; then + len_padding_extension=1 # Final extension cannot be empty: see PR #792 + else + len_padding_extension=$((508 - len_all)) + fi + len_padding_extension_hex=$(printf "%02x\n" $len_padding_extension) + len2twobytes "$len_padding_extension_hex" + all_extensions="$all_extensions\\x00\\x15\\x${LEN_STR:0:2}\\x${LEN_STR:4:2}" + for (( i=0; i> "$SOCK_REPLY_FILE" + rm "$sock_reply_file3" + fi + fi + fi + skip=false + if [[ $hello_done -eq 1 ]]; then + decrypted_response="$(check_tls_serverhellodone "$tls_hello_ascii" "$process_full" "$cipher" "$key_and_iv")" + hello_done=$? + [[ "$hello_done" -eq 0 ]] && [[ -n "$decrypted_response" ]] && tls_hello_ascii="$(toupper "$decrypted_response")" + if [[ "$hello_done" -eq 3 ]]; then + hello_done=1; skip=true + debugme echo "reading server hello..." + parse_tls_serverhello "$tls_hello_ascii" "ephemeralkey" + ret=$? + if [[ "$ret" -eq 0 ]] || [[ "$ret" -eq 2 ]]; then + cipher=$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt") + if [[ -n "$hrr" ]]; then + key_and_iv="$(derive-handshake-traffic-keys "$cipher" "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" "$clienthello1" "$hrr" "$TLS_CLIENT_HELLO" "$TLS_SERVER_HELLO")" + else + key_and_iv="$(derive-handshake-traffic-keys "$cipher" "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" "" "" "$TLS_CLIENT_HELLO" "$TLS_SERVER_HELLO")" + fi + [[ $? -ne 0 ]] && hello_done=2 + else + hello_done=2 + fi + fi + fi + done + + debugme echo "reading server hello..." + if [[ "$DEBUG" -ge 4 ]]; then + hexdump -C $SOCK_REPLY_FILE | head -6 + echo + fi + + parse_tls_serverhello "$tls_hello_ascii" "$process_full" "$cipher_list_2send" + save=$? + if "$close_connection" && [[ $save == 0 ]]; then + send_close_notify "$DETECTED_TLS_VERSION" + fi + + if [[ $DEBUG -ge 2 ]]; then + # see https://secure.wand.net.nz/trac/libprotoident/wiki/SSL + lines=$(count_lines "$(hexdump -C "$SOCK_REPLY_FILE" 2>$ERRFILE)") + tm_out " ($lines lines returned) " + fi + + # determine the return value for higher level, so that they can tell what the result is + if [[ $save -eq 1 ]] || [[ $lines -eq 1 ]]; then + ret=1 # NOT available + elif [[ $save -eq 3 ]]; then + # only for IMAP currently 'a002 NO Starttls' + ret=3 + elif [[ $save -eq 8 ]]; then + # odd return, we just pass this from parse_tls_serverhello() back + ret=8 + elif [[ $save -eq 4 ]]; then + # STARTTLS problem passing back + ret=4 + else + if [[ 03$tls_low_byte -eq $DETECTED_TLS_VERSION ]]; then + ret=0 # protocol available, TLS version returned equal to the one send + else + debugme echo -n "protocol send: 0x03$tls_low_byte, returned: 0x$DETECTED_TLS_VERSION" + ret=2 # protocol NOT available, server downgraded to $DETECTED_TLS_VERSION + fi + fi + debugme echo + else + debugme echo "stuck on sending: $ret" + fi + + "$close_connection" && close_socket + tmpfile_handle ${FUNCNAME[0]}.dd $SOCK_REPLY_FILE + return $ret +} + + +####### Vulnerabilities follow ####### +# General overview which browser "supports" which vulnerability: +# https://en.wikipedia.org/wiki/Transport_Layer_Security-SSL#Web_browsers + +# mainly adapted from https://gist.github.com/takeshixx/10107280 +# +run_heartbleed(){ + local tls_hexcode + local heartbleed_payload + local -i n lines_returned + local append="" + local tls_hello_ascii="" + local jsonID="heartbleed" + local cve="CVE-2014-0160" + local cwe="CWE-119" + local hint="" + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for heartbleed vulnerability " && outln + pr_bold " Heartbleed"; out " ($cve) " + + if ( [[ "$STARTTLS_PROTOCOL" =~ ldap ]] || [[ "$STARTTLS_PROTOCOL" =~ irc ]] ); then + prln_local_problem "STARTTLS/$STARTTLS_PROTOCOL and --ssl-native collide here" + return 1 + fi + + [[ -z "$TLS_EXTENSIONS" ]] && determine_tls_extensions + if [[ ! "${TLS_EXTENSIONS}" =~ heartbeat ]]; then + pr_svrty_best "not vulnerable (OK)" + outln ", no heartbeat extension" + fileout "$jsonID" "OK" "not vulnerable, no heartbeat extension" "$cve" "$cwe" + return 0 + fi + + if [[ 0 -eq $(has_server_protocol tls1) ]]; then + tls_hexcode="x03, x01" + elif [[ 0 -eq $(has_server_protocol tls1_1) ]]; then + tls_hexcode="x03, x02" + elif [[ 0 -eq $(has_server_protocol tls1_2) ]]; then + tls_hexcode="x03, x03" + elif [[ 0 -eq $(has_server_protocol ssl3) ]]; then + tls_hexcode="x03, x00" + else # no protocol for some reason defined, determine TLS versions offered with a new handshake + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>$ERRFILE =SSLv3): 18030x in case of a heartBEAT reply -- which we take as a positive result + tls_hello_ascii=$(hexdump -v -e '16/1 "%02X"' "$SOCK_REPLY_FILE") + debugme echo "tls_content_type: ${tls_hello_ascii:0:2}" + debugme echo "tls_protocol: ${tls_hello_ascii:2:4}" + + lines_returned=$(count_lines "$(hexdump -ve '16/1 "%02x " " \n"' "$SOCK_REPLY_FILE")") + debugme echo "lines HB reply: $lines_returned" + + if [[ $DEBUG -ge 3 ]]; then + tmln_out "\nheartbleed reply: " + hexdump -C "$SOCK_REPLY_FILE" | head -20 + [[ $lines_returned -gt 20 ]] && tmln_out "[...]" + tmln_out + fi + + if [[ $lines_returned -gt 1 ]] && [[ "${tls_hello_ascii:0:4}" == 1803 ]]; then + if [[ "$STARTTLS_PROTOCOL" =~ ftp ]]; then + # check possibility of weird vsftpd reply, see #426, despite "1803" seems very unlikely... + if grep -q '500 OOPS' "$SOCK_REPLY_FILE" ; then + append=", successful weeded out vsftpd false positive" + pr_svrty_best "not vulnerable (OK)"; out "$append" + fileout "$jsonID" "OK" "not vulnerable $append" "$cve" "$cwe" + else + out "likely " + pr_svrty_critical "VULNERABLE (NOT ok)" + [[ $DEBUG -lt 3 ]] && tm_out ", use debug >=3 to confirm" + fileout "$jsonID" "CRITICAL" "VULNERABLE $cve" "$cwe" "$hint" + fi + else + pr_svrty_critical "VULNERABLE (NOT ok)" + fileout "$jsonID" "CRITICAL" "VULNERABLE $cve" "$cwe" "$hint" + fi + else + pr_svrty_best "not vulnerable (OK)" + fileout "$jsonID" "OK" "not vulnerable $cve" "$cwe" + fi + fi + outln + tmpfile_handle ${FUNCNAME[0]}.dd $SOCK_REPLY_FILE + close_socket + return 0 +} + +# helper function +ok_ids(){ + prln_svrty_best "\n ok -- something reset our ccs packets" + return 0 +} + +# see https://www.openssl.org/news/secadv_20140605.txt +# mainly adapted from Ramon de C Valle's C code from https://gist.github.com/rcvalle/71f4b027d61a78c42607 +#FIXME: At a certain point ccs needs to be changed and make use of code2network using a file, then tls_sockets +# +run_ccs_injection(){ + local tls_hexcode ccs_message client_hello byte6 + local -i retval ret=0 + local tls_hello_ascii="" + local jsonID="CCS" + local cve="CVE-2014-0224" + local cwe="CWE-310" + local hint="" + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for CCS injection vulnerability " && outln + pr_bold " CCS"; out " ($cve) " + + if ( [[ "$STARTTLS_PROTOCOL" =~ ldap ]] || [[ "$STARTTLS_PROTOCOL" =~ irc ]] ); then + prln_local_problem "STARTTLS/$STARTTLS_PROTOCOL and --ssl-native collide here" + return 1 + fi + + if [[ 0 -eq $(has_server_protocol tls1) ]]; then + tls_hexcode="x03, x01" + elif [[ 0 -eq $(has_server_protocol tls1_1) ]]; then + tls_hexcode="x03, x02" + elif [[ 0 -eq $(has_server_protocol tls1_2) ]]; then + tls_hexcode="x03, x03" + elif [[ 0 -eq $(has_server_protocol ssl3) ]]; then + tls_hexcode="x03, x00" + else # no protocol for some reason defined, determine TLS versions offered with a new handshake + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>$ERRFILE RST +# +# 0A: Unexpected message +# 28: Handshake failure + if [[ -z "${tls_hello_ascii:0:12}" ]]; then + # empty reply + pr_svrty_best "not vulnerable (OK)" + if [[ $retval -eq 3 ]]; then + fileout "$jsonID" "OK" "not vulnerable (timed out)" "$cve" "$cwe" + else + fileout "$jsonID" "OK" "not vulnerable" "$cve" "$cwe" + fi + elif [[ "${tls_hello_ascii:0:4}" == "1503" ]]; then + if [[ ! "${tls_hello_ascii:5:2}" =~ [03|02|01|00] ]]; then + pr_warning "test failed " + out "no proper TLS repy (debug info: protocol sent: 1503${tls_hexcode#x03, x}, reply: ${tls_hello_ascii:0:14}" + fileout "$jsonID" "DEBUG" "test failed, around line $LINENO, debug info (${tls_hello_ascii:0:14})" "$cve" "$cwe" "$hint" + ret=1 + elif [[ "$byte6" == "15" ]]; then + # decryption failed received + pr_svrty_critical "VULNERABLE (NOT ok)" + fileout "$jsonID" "CRITICAL" "VULNERABLE" "$cve" "$cwe" "$hint" + elif [[ "$byte6" == "0A" ]] || [[ "$byte6" == "28" ]]; then + # Unexpected message / Handshake failure received + pr_warning "likely " + out "not vulnerable (OK)" + out " - alert description type: $byte6" + fileout "$jsonID" "WARN" "probably not vulnerable but received 0x${byte6} instead of 0x15" "$cve" "$cwe" "$hint" + elif [[ "$byte6" == "14" ]]; then + # bad_record_mac -- this is not "not vulnerable" + out "likely " + pr_svrty_critical "VULNERABLE (NOT ok)" + out ", suspicious \"bad_record_mac\" ($byte6)" + fileout "$jsonID" "CRITICAL" "likely VULNERABLE" "$cve" "$cwe" "$hint" + else + # other errors, see https://tools.ietf.org/html/rfc5246#section-7.2 + out "likely " + pr_svrty_critical "VULNERABLE (NOT ok)" + out ", suspicious error code \"$byte6\" returned. Please report" + fileout "$jsonID" "CRITICAL" "likely VULNERABLE with $byte6" "$cve" "$cwe" "$hint" + fi + elif [[ $STARTTLS_PROTOCOL == "mysql" ]] && [[ "${tls_hello_ascii:14:12}" == "233038533031" ]]; then + # MySQL community edition (yaSSL) returns a MySQL error instead of a TLS Alert + # Error: #08S01 Bad handshake + pr_svrty_best "not vulnerable (OK)" + out ", looks like MySQL community edition (yaSSL)" + fileout "$jsonID" "OK" "not vulnerable (MySQL community edition (yaSSL) detected)" "$cve" "$cwe" + elif [[ "$byte6" == [0-9a-f][0-9a-f] ]] && [[ "${tls_hello_ascii:2:2}" != "03" ]]; then + pr_warning "test failed" + out ", probably read buffer too small (${tls_hello_ascii:0:14})" + fileout "$jsonID" "DEBUG" "test failed, probably read buffer too small (${tls_hello_ascii:0:14})" "$cve" "$cwe" "$hint" + ret=1 + else + pr_warning "test failed " + out "around line $LINENO (debug info: ${tls_hello_ascii:0:12},$byte6)" + fileout "$jsonID" "DEBUG" "test failed, around line $LINENO, debug info (${tls_hello_ascii:0:12},$byte6)" "$cve" "$cwe" "$hint" + ret=1 + fi + outln + + tmpfile_handle ${FUNCNAME[0]}.dd $SOCK_REPLY_FILE + close_socket + return $ret +} + +sub_session_ticket_tls() { + local tls_proto="$1" + local sessticket_tls="" + #FIXME: we likely have done this already before (either @ run_server_defaults() or at least the output + # from a previous handshake) --> would save 1x connect. We have TLS_TICKET but not yet the ticket itself #FIXME + #ATTENTION: we DO NOT use SNI here as we assume ticketbleed is a vulnerability of the TLS stack. If we'd do SNI here, we'd also need + # it in the ClientHello of run_ticketbleed() otherwise the ticket will be different and the whole thing won't work! + # + sessticket_tls="$($OPENSSL s_client $(s_client_options "$BUGS $tls_proto $PROXY -connect $NODEIP:$PORT") $ERRFILE | awk '/TLS session ticket:/,/^$/' | awk '!/TLS session ticket/')" + sessticket_tls="$(sed -e 's/^.* - /x/g' -e 's/ .*$//g' <<< "$sessticket_tls" | tr '\n' ',')" + sed -e 's/ /,x/g' -e 's/-/,x/g' <<< "$sessticket_tls" + +} + + +# see https://blog.filippo.io/finding-ticketbleed/ | https://ticketbleed.com/ +run_ticketbleed() { + local tls_hexcode tls_proto="" + local session_tckt_tls="" + local -i len_ch=300 # fixed len of prepared clienthello below + local sid="x00,x0B,xAD,xC0,xDE,x00," # some abitratry bytes + local len_sid="$(( ${#sid} / 4))" + local xlen_sid="$(dec02hex $len_sid)" + local -i len_tckt_tls=0 nr_sid_detected=0 + local xlen_tckt_tls="" xlen_handshake_record_layer="" xlen_handshake_ssl_layer="" + local -i len_handshake_record_layer=0 + local i + local -a memory sid_detected + local early_exit=true + local -i ret=0 + local jsonID="ticketbleed" + local cve="CVE-2016-9244" + local cwe="CWE-200" + local hint="" + + [[ -n "$STARTTLS" ]] && return 0 + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for Ticketbleed vulnerability " && outln + pr_bold " Ticketbleed"; out " ($cve), experiment. " + + if [[ "$SERVICE" != HTTP ]] && ! "$CLIENT_AUTH"; then + outln "-- (applicable only for HTTPS)" + fileout "$jsonID" "INFO" "not applicable, not HTTP" "$cve" "$cwe" + return 0 + fi + + # highly unlikely that it is NOT supported. We may loose time here but it's more solid + [[ -z "$TLS_EXTENSIONS" ]] && determine_tls_extensions + if [[ ! "${TLS_EXTENSIONS}" =~ "session ticket" ]]; then + pr_svrty_best "not vulnerable (OK)" + outln ", no session ticket extension" + fileout "$jsonID" "OK" "no session ticket extension" "$cve" "$cwe" + return 0 + fi + + if [[ 0 -eq $(has_server_protocol tls1) ]]; then + tls_hexcode="x03, x01"; tls_proto="-tls1" + elif [[ 0 -eq $(has_server_protocol tls1_1) ]]; then + tls_hexcode="x03, x02"; tls_proto="-tls1_1" + elif [[ 0 -eq $(has_server_protocol tls1_2) ]]; then + tls_hexcode="x03, x03"; tls_proto="-tls1_2" + elif [[ 0 -eq $(has_server_protocol ssl3) ]]; then + tls_hexcode="x03, x00"; tls_proto="-ssl3" + else # no protocol for some reason defined, determine TLS versions offered with a new handshake + "$HAS_TLS13" && tls_proto="-no_tls1_3" + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS $tls_proto -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>$ERRFILE $TEMPDIR/${FUNCNAME[0]}.tls_hello_ascii${i}.txt + else + ret=1 + pr_warning "test failed" + out " around line $LINENO (debug info: ${tls_hello_ascii:0:2}, ${tls_hello_ascii:2:10})" + fileout "$jsonID" "DEBUG" "test failed, around $LINENO (debug info: ${tls_hello_ascii:0:2}, ${tls_hello_ascii:2:10})" "$cve" "$cwe" + send_close_notify "${tls_hello_ascii:18:4}" + close_socket + break + fi + send_close_notify "${tls_hello_ascii:18:4}" + close_socket + done + + if ! "$early_exit"; then + # here we test the replies if a TLS server hello was received >1x + for i in 1 2 3 ; do + if [[ "${sid_detected[i]}" =~ $sid_input ]]; then + # was our faked TLS SID returned? + nr_sid_detected+=1 + fi + done + if [[ $nr_sid_detected -eq 3 ]]; then + if [[ ${memory[1]} != ${memory[2]} ]] && [[ ${memory[2]} != ${memory[3]} ]]; then + pr_svrty_critical "VULNERABLE (NOT ok)" + fileout "$jsonID" "CRITICAL" "VULNERABLE" "$cve" "$cwe" "$hint" + else + pr_svrty_best "not vulnerable (OK)" + out ", session IDs were returned but potential memory fragments do not differ" + fileout "$jsonID" "OK" "not vulnerable, returned potential memory fragments do not differ" "$cve" "$cwe" + fi + else + if [[ "$DEBUG" -ge 2 ]]; then + echo + pr_warning "test failed, non reproducible results!" + else + pr_warning "test failed, non reproducible results!" + out " Please run again w \"--debug=2\" (# of faked TLS SIDs detected: $nr_sid_detected)" + fi + fileout "$jsonID" "DEBUG" "test failed, non reproducible results. $nr_sid_detected TLS Session IDs $nr_sid_detected, ${sid_detected[1]},${sid_detected[2]},${sid_detected[3]}" "$cve" "$cwe" + ret=1 + fi + fi + outln + return $ret +} + +# Overview @ https://www.exploresecurity.com/wp-content/uploads/custom/SSL_manual_cheatsheet.html +# +run_renego() { + local legacycmd="" proto="$OPTIMAL_PROTO" + local sec_renego sec_client_renego + local -i ret=0 + local cve="" + local cwe="CWE-310" + local hint="" + local jsonID="" + # No SNI needed here as there won't be two different SSL stacks for one IP + + "$HAS_TLS13" && [[ -z "$proto" ]] && proto="-no_tls1_3" + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for Renegotiation vulnerabilities " && outln + + pr_bold " Secure Renegotiation (RFC 5746) " + jsonID="secure_renego" + + if "$TLS13_ONLY"; then + # https://www.openssl.org/blog/blog/2018/02/08/tlsv1.3/ + pr_svrty_best "not vulnerable (OK)" + [[ $DEBUG -ge 1 ]] && out ", no renegotiation support in TLS 1.3 only servers" + outln + fileout "$jsonID" "OK" "TLS 1.3 only server" "$cve" "$cwe" + else + # first fingerprint for the Line "Secure Renegotiation IS NOT" or "Secure Renegotiation IS " + $OPENSSL s_client $(s_client_options "$proto $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY") 2>&1 $TMPFILE 2>$ERRFILE + if sclient_connect_successful $? $TMPFILE; then + grep -iaq "Secure Renegotiation IS NOT" $TMPFILE + sec_renego=$? # 0= Secure Renegotiation IS NOT supported + # grep -iaq "Secure Renegotiation IS supported" + #FIXME: didn't occur to me yet but why not also to check on "Secure Renegotiation IS supported" + case $sec_renego in + 0) prln_svrty_critical "Not supported / VULNERABLE (NOT ok)" + fileout "$jsonID" "CRITICAL" "VULNERABLE" "$cve" "$cwe" "$hint" + ;; + 1) prln_svrty_best "supported (OK)" + fileout "$jsonID" "OK" "supported" "$cve" "$cwe" + ;; + *) prln_warning "FIXME (bug): $sec_renego" + fileout "$jsonID" "WARN" "FIXME (bug) $sec_renego" "$cve" "$cwe" + ;; + esac + else + prln_warning "OpenSSL handshake didn't succeed" + fileout "$jsonID" "WARN" "OpenSSL handshake didn't succeed" "$cve" "$cwe" + fi + fi + + # FIXME: Basically this can be done with sockets and we might have that information already + # see https://tools.ietf.org/html/rfc5746#section-3.4: 'The client MUST include either an empty "renegotiation_info" + # extension, or the TLS_EMPTY_RENEGOTIATION_INFO_SCSV signaling cipher suite value in the ClientHello. [..] + # When a ServerHello is received, the client MUST check if it includes the "renegotiation_info" extension: + # If the extension is not present, the server does not support secure renegotiation' + + + pr_bold " Secure Client-Initiated Renegotiation " + jsonID="secure_client_renego" + cve="CVE-2011-1473" + # see: https://blog.qualys.com/ssllabs/2011/10/31/tls-renegotiation-and-denial-of-service-attacks + # https://blog.ivanristic.com/2009/12/testing-for-ssl-renegotiation.html -- head/get doesn't seem to be needed though + # https://archive.fo/20130415224936/http://www.thc.org/thc-ssl-dos/ + # https://vincent.bernat.ch/en/blog/2011-ssl-dos-mitigation + case "$OSSL_VER" in + 0.9.8*) # we need this for Mac OSX unfortunately + case "$OSSL_VER_APPENDIX" in + [a-l]) + prln_local_problem " Your $OPENSSL cannot test this secure renegotiation vulnerability" + fileout "$jsonID" "WARN" "your $OPENSSL cannot test this secure renegotiation vulnerability" "$cve" "$cwe" + return 1 + ;; + [m-z]) + ;; # all ok + esac + ;; + 1.0.1*|1.0.2*) + legacycmd="-legacy_renegotiation" + ;; + 0.9.9*|1.0*|1.1*) + ;; # all ok + esac + + + if "$TLS13_ONLY"; then + pr_svrty_best "not vulnerable (OK)" + [[ $DEBUG -ge 1 ]] && out ", no renegotiation support in TLS 1.3 only servers" + outln + fileout "$jsonID" "OK" "not vulnerable, TLS 1.3 only" "$cve" "$cwe" + elif "$CLIENT_AUTH"; then + prln_warning "client x509-based authentication prevents this from being tested" + fileout "$jsonID" "WARN" "client x509-based authentication prevents this from being tested" + sec_client_renego=1 + else + # We need up to two tries here, as some LiteSpeed servers don't answer on "R" and block. Thus first try in the background + # msg enables us to look deeper into it while debugging + echo R | $OPENSSL s_client $(s_client_options "$proto $BUGS $legacycmd $STARTTLS -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>>$ERRFILE & + wait_kill $! $HEADER_MAXSLEEP + if [[ $? -eq 3 ]]; then + pr_svrty_good "likely not vulnerable (OK)"; outln ", timed out" # it hung + fileout "$jsonID" "OK" "likely not vulnerable (timed out)" "$cve" "$cwe" + sec_client_renego=1 + else + # second try in the foreground as we are sure now it won't hang + echo R | $OPENSSL s_client $(s_client_options "$proto $legacycmd $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>>$ERRFILE + sec_client_renego=$? # 0=client is renegotiating & doesn't return an error --> vuln! + case "$sec_client_renego" in + 0) # We try again if server is HTTP. This could be either a node.js server or something else. + # node.js has a mitigation which allows 3x R and then blocks. So we test 4x + # This way we save a couple seconds as we weeded out the ones which are more robust + if [[ $SERVICE != HTTP ]]; then + pr_svrty_medium "VULNERABLE (NOT ok)"; outln ", potential DoS threat" + fileout "$jsonID" "MEDIUM" "VULNERABLE, potential DoS threat" "$cve" "$cwe" "$hint" + else + (for i in {1..4}; do echo R; sleep 1; done) | \ + $OPENSSL s_client $(s_client_options "$proto $legacycmd $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY") >$TMPFILE 2>>$ERRFILE + case $? in + 0) pr_svrty_high "VULNERABLE (NOT ok)"; outln ", DoS threat" + fileout "$jsonID" "HIGH" "VULNERABLE, DoS threat" "$cve" "$cwe" "$hint" + ;; + 1) pr_svrty_good "not vulnerable (OK)" + outln " -- mitigated" + fileout "$jsonID" "OK" "not vulnerable, mitigated" "$cve" "$cwe" + ;; + *) prln_warning "FIXME (bug): $sec_client_renego (4 tries)" + fileout "$jsonID" "DEBUG" "FIXME (bug 4 tries) $sec_client_renego" "$cve" "$cwe" + ret=1 + ;; + esac + fi + ;; + 1) + prln_svrty_good "not vulnerable (OK)" + fileout "$jsonID" "OK" "not vulnerable" "$cve" "$cwe" + ;; + *) + prln_warning "FIXME (bug): $sec_client_renego" + fileout "$jsonID" "DEBUG" "FIXME (bug) $sec_client_renego - Please report" "$cve" "$cwe" + ret=1 + ;; + esac + fi + fi + + #pr_bold " Insecure Client-Initiated Renegotiation " # pre-RFC 5746, CVE-2009-3555 + #jsonID="insecure_client_renego" + # + # https://www.openssl.org/news/vulnerabilities.html#y2009. It can only be tested with OpenSSL <=0.9.8k + # Insecure Client-Initiated Renegotiation is missing ==> sockets. When we complete the handshake ;-) + + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + +run_crime() { + local -i ret=0 sclient_success + local addcmd="" + local cve="CVE-2012-4929" + local cwe="CWE-310" + local hint="" + + # In a nutshell: don't offer TLS/SPDY compression. This tests for CRIME Vulnerability on HTTPS only, + # not SPDY or ALPN (yet). Please note that it is an attack where you need client side control, so in + # regular situations this # means anyway "game over", with or without CRIME. + # + # https://blog.qualys.com/ssllabs/2012/09/14/crime-information-leakage-attack-against-ssltls + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for CRIME vulnerability " && outln + pr_bold " CRIME, TLS " ; out "($cve) " + + if "$TLS13_ONLY"; then + pr_svrty_best "not vulnerable (OK)" + [[ $DEBUG -ge 1 ]] && out ", no compression in TLS 1.3 only servers" + outln + fileout "$jsonID" "OK" "TLS 1.3 only server" "$cve" "$cwe" + return 0 + fi + + if ! "$HAS_ZLIB"; then + if "$SSL_NATIVE"; then + prln_local_problem "$OPENSSL lacks zlib support" + fileout "CRIME_TLS" "WARN" "CRIME, TLS: Not tested. $OPENSSL lacks zlib support" "$cve" "$cwe" + return 1 + else + tls_sockets "03" "$TLS12_CIPHER" "" "" "true" + sclient_success=$? + [[ $sclient_success -eq 2 ]] && sclient_success=0 + [[ $sclient_success -eq 0 ]] && cp "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt" $TMPFILE + fi + else + [[ "$OSSL_VER" == 0.9.8* ]] && addcmd="-no_ssl2" + "$HAS_TLS13" && [[ -z "$OPTIMAL_PROTO" ]] && addcmd+=" -no_tls1_3" + $OPENSSL s_client $(s_client_options "$OPTIMAL_PROTO $BUGS -comp $addcmd $STARTTLS -connect $NODEIP:$PORT $PROXY $SNI") $TMPFILE + sclient_connect_successful $? $TMPFILE + sclient_success=$? + fi + + if [[ $sclient_success -ne 0 ]]; then + pr_warning "test failed (couldn't connect)" + fileout "CRIME_TLS" "WARN" "Check failed, couldn't connect" "$cve" "$cwe" + ret=1 + elif grep -a Compression $TMPFILE | grep -aq NONE >/dev/null; then + pr_svrty_good "not vulnerable (OK)" + if [[ $SERVICE != HTTP ]] && ! "$CLIENT_AUTH"; then + out " (not using HTTP anyway)" + fileout "CRIME_TLS" "OK" "not vulnerable (not using HTTP anyway)" "$cve" "$cwe" + else + fileout "CRIME_TLS" "OK" "not vulnerable" "$cve" "$cwe" + fi + else + if [[ $SERVICE == HTTP ]] || "$CLIENT_AUTH"; then + pr_svrty_high "VULNERABLE (NOT ok)" + fileout "CRIME_TLS" "HIGH" "VULNERABLE" "$cve" "$cwe" "$hint" + else + pr_svrty_medium "VULNERABLE but not using HTTP: probably no exploit known" + fileout "CRIME_TLS" "MEDIUM" "VULNERABLE, but not using HTTP. Probably no exploit known" "$cve" "$cwe" "$hint" + # not clear whether a protocol != HTTP offers the ability to repeatedly modify the input + # which is done e.g. via javascript in the context of HTTP + fi + fi + outln + +# this needs to be re-done i order to remove the redundant check for spdy + + # weed out starttls, spdy-crime is a web thingy +# if [[ "x$STARTTLS" != "x" ]]; then +# echo +# return $ret +# fi + + # weed out non-webports, spdy-crime is a web thingy. there's a catch thoug, you see it? +# case $PORT in +# 25|465|587|80|110|143|993|995|21) +# echo +# return $ret +# esac + +# if "$HAS_NPN"; then +# $OPENSSL s_client -host $NODE -port $PORT -nextprotoneg $NPN_PROTOs $SNI /dev/null >$TMPFILE +# if [[ $? -eq 0 ]]; then +# echo +# pr_bold "CRIME Vulnerability, SPDY " ; outln "($cve): " + +# STR=$(grep Compression $TMPFILE ) +# if echo $STR | grep -q NONE >/dev/null; then +# pr_svrty_best "not vulnerable (OK)" +# ret=$((ret + 0)) +# else +# pr_svrty_critical "VULNERABLE (NOT ok)" +# ret=$((ret + 1)) +# fi +# fi +# fi +# [[ $DEBUG -ge 2 ]] tmln_out "$STR" + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + + +# BREACH is a HTTP-level compression & an attack which works against any cipher suite and is agnostic +# to the version of TLS/SSL, more: http://www.breachattack.com/ . Foreign referrers are the important thing here! +# Mitigation: see https://community.qualys.com/message/20360 +# +run_breach() { + local header + local -i ret=0 + local -i was_killed=0 + local referer useragent + local url="$1" + local spaces=" " + local disclaimer="" + local when_makesense=" Can be ignored for static pages or if no secrets in the page" + local cve="CVE-2013-3587" + local cwe="CWE-310" + local hint="" + local jsonID="BREACH" + + [[ $SERVICE != HTTP ]] && ! "$CLIENT_AUTH" && return 7 + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for BREACH (HTTP compression) vulnerability " && outln + pr_bold " BREACH"; out " ($cve) " + if "$CLIENT_AUTH"; then + outln "cannot be tested (server side requires x509 authentication)" + fileout "$jsonID" "INFO" "was not tested, server side requires x509 authentication" "$cve" "$cwe" + fi + + # if [[ $NR_HEADER_FAIL -ge $MAX_HEADER_FAIL ]]; then + # pr_warning "Retrieving HTTP header failed before. Skipping." + # fileout "$jsonID" "WARN" "HTTP response was wampty before" "$cve" "$cwe" + # outln + # return 1 + # fi + + [[ -z "$url" ]] && url="/" + disclaimer=" - only supplied \"$url\" tested" + + referer="https://google.com/" + [[ "$NODE" =~ google ]] && referer="https://yandex.ru/" # otherwise we have a false positive for google.com + useragent="$UA_STD" + $SNEAKY && useragent="$UA_SNEAKY" + printf "GET $url HTTP/1.1\r\nHost: $NODE\r\nUser-Agent: $useragent\r\nReferer: $referer\r\nConnection: Close\r\nAccept-encoding: gzip,deflate,compress,br\r\nAccept: text/*\r\n\r\n" | $OPENSSL s_client $(s_client_options "$OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP:$PORT $PROXY $SNI") 1>$TMPFILE 2>$ERRFILE & + wait_kill $! $HEADER_MAXSLEEP + was_killed=$? # !=0 was killed + result="$(grep -ia Content-Encoding: $TMPFILE)" + result="$(strip_lf "$result")" + result="${result#*:}" + result="$(strip_spaces "$result")" + debugme echo "$result" + if [[ ! -s $TMPFILE ]]; then + pr_warning "failed (HTTP header request stalled or empty return" + if [[ $was_killed -ne 0 ]]; then + pr_warning " and was terminated" + fileout "$jsonID" "WARN" "Test failed as HTTP request stalled and was terminated" "$cve" "$cwe" + else + fileout "$jsonID" "WARN" "Test failed as HTTP response was empty" "$cve" "$cwe" + fi + prln_warning ") " + ret=1 + elif [[ -z $result ]]; then + pr_svrty_best "no HTTP compression (OK) " + outln "$disclaimer" + fileout "$jsonID" "OK" "not vulnerable, no HTTP compression $disclaimer" "$cve" "$cwe" + else + pr_svrty_high "potentially NOT ok, \"$result\" HTTP compression detected." + outln "$disclaimer" + outln "$spaces$when_makesense" + fileout "$jsonID" "HIGH" "potentially VULNERABLE, $result HTTP compression detected $disclaimer" "$cve" "$cwe" "$hint" + fi + # Any URL can be vulnerable. I am testing now only the given URL! + + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + + +# SWEET32 (https://sweet32.info/). Birthday attacks on 64-bit block ciphers. +# In a nutshell: don't use 3DES ciphers anymore (DES, RC2 and IDEA too). +# Please note as opposed to RC4 (stream cipher) RC2 is a block cipher. +# +run_sweet32() { + local -i sclient_success=1 + local sweet32_ciphers="IDEA-CBC-SHA:IDEA-CBC-MD5:RC2-CBC-MD5:KRB5-IDEA-CBC-SHA:KRB5-IDEA-CBC-MD5:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:DES-CBC3-MD5:DES-CBC3-SHA:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:DES-CFB-M1:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:ADH-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:EXP1024-RC2-CBC-MD5:DES-CBC-MD5:DES-CBC-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-ADH-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-RC2-CBC-MD5:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-MD5:EXP-KRB5-DES-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA" + local sweet32_ciphers_hex="00,07, 00,21, 00,25, c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, fe,ff, ff,e0, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,1a, 00,62, 00,09, 00,61, 00,1e, 00,22, fe,fe, ff,e1, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e" + local ssl2_sweet32_ciphers='RC2-CBC-MD5:EXP-RC2-CBC-MD5:IDEA-CBC-MD5:DES-CBC-MD5:DES-CBC-SHA:DES-CBC3-MD5:DES-CBC3-SHA:DES-CFB-M1' + local ssl2_sweet32_ciphers_hex='03,00,80, 04,00,80, 05,00,80, 06,00,40, 06,01,40, 07,00,C0, 07,01,C0, FF,80,00' + local nr_cipher_minimal=21 + local proto + local cve="CVE-2016-2183 CVE-2016-6329" + local cwe="CWE-327" + local hint="" + local -i nr_sweet32_ciphers=0 nr_supported_ciphers=0 nr_ssl2_sweet32_ciphers=0 nr_ssl2_supported_ciphers=0 + local ssl2_sweet=false + local using_sockets=true + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for SWEET32 (Birthday Attacks on 64-bit Block Ciphers) " && outln + pr_bold " SWEET32"; out " (${cve// /, }) " + + if "$TLS13_ONLY"; then + # Unfortunately there's no restriction using TLS 1.2 with $sweet32_ciphers + pr_svrty_best "not vulnerable (OK)" + [[ $DEBUG -ge 1 ]] && out ", TLS 1.3 doesn't offer such ciphers" + outln + fileout "$jsonID" "OK" "not vulnerable" "$cve" "$cwe" + return 0 + fi + + "$SSL_NATIVE" && using_sockets=false + # The openssl binary distributed has almost everything we need (PSK, KRB5 ciphers and feff, ffe0 are typically missing). + # Measurements show that there's little impact whether we use sockets or TLS here, so the default is sockets here. + if "$using_sockets"; then + for proto in 03 02 01 00; do + [[ $(has_server_protocol "$proto") -eq 1 ]] && continue + tls_sockets "$proto" "${sweet32_ciphers_hex}, 00,ff" + sclient_success=$? + [[ $sclient_success -eq 2 ]] && sclient_success=0 + [[ $sclient_success -eq 0 ]] && break + done + if [[ 1 -ne $(has_server_protocol "ssl2") ]]; then + sslv2_sockets "$ssl2_sweet32_ciphers_hex" + case $? in + 3) ssl2_sweet=true + add_tls_offered ssl2 yes ;; + 0) ;; # ssl2_sweet=false + 1|4|6|7) debugme "${FUNCNAME[0]}: test problem we don't handle here" + ;; + esac + fi + else + nr_sweet32_ciphers=$(count_ciphers $sweet32_ciphers) + nr_supported_ciphers=$(count_ciphers $(actually_supported_osslciphers $sweet32_ciphers)) + debugme echo "$nr_sweet32_ciphers / $nr_supported_ciphers" + + nr_ssl2_sweet32_ciphers=$(count_ciphers $ssl2_sweet32_ciphers) + nr_ssl2_supported_ciphers=$(count_ciphers $(actually_supported_osslciphers $ssl2_sweet32_ciphers)) + debugme echo "$nr_ssl2_sweet32_ciphers / $nr_ssl2_supported_ciphers" + + if [[ $(( nr_supported_ciphers + nr_ssl2_supported_ciphers )) -le $nr_cipher_minimal ]]; then + pr_local_problem "Only ${nr_supported_ciphers}+${nr_ssl2_supported_ciphers} \"SWEET32 ciphers\" found in your $OPENSSL." + outln " Test skipped" + fileout "SWEET32" "WARN" "Not tested, lack of local support ($((nr_supported_ciphers + nr_ssl2_supported_ciphers)) ciphers only)" "$cve" "$cwe" "$hint" + return 1 + fi + for proto in -no_ssl2 -tls1_1 -tls1 -ssl3; do + [[ $nr_supported_ciphers -eq 0 ]] && break + ! "$HAS_SSL3" && [[ "$proto" == -ssl3 ]] && continue + if [[ "$proto" != -no_ssl2 ]]; then + "$FAST" && break + [[ $(has_server_protocol "${proto:1}") -eq 1 ]] && continue + fi + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS $proto -cipher $sweet32_ciphers -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE $TMPFILE $TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE $TEMPDIR/dh_p.txt + if [[ ! -s "$common_primes_file" ]]; then + prln_local_problem "couldn't read common primes file $common_primes_file" + out "${spaces}" + fileout "$jsonID2" "WARN" "couldn't read common primes file $common_primes_file" + return 1 + else + dh_p="$(toupper "$dh_p")" + # In the previous line of the match is bascially the hint we want to echo + # the most elegant thing to get the previous line [ awk '/regex/ { print x }; { x=$0 }' ] doesn't work with gawk + lineno_matched=$(grep -n "$dh_p" "$common_primes_file" 2>/dev/null | awk -F':' '{ print $1 }') + if [[ "$lineno_matched" -ne 0 ]]; then + DH_GROUP_OFFERED="$(awk "NR == $lineno_matched-1" "$common_primes_file" | awk -F'"' '{ print $2 }')" + #subret=1 # vulnerable: common prime + else + DH_GROUP_OFFERED="Unknown DH group" + : + #subret=0 # not vulnerable: no known common prime + fi + return 0 + fi +} + + +# helper function for run_logjam see below +# +out_common_prime() { + local jsonID2="$1" + local cve="$2" + local cwe="$3" + + [[ "$DH_GROUP_OFFERED" == ffdhe* ]] && [[ ! "$DH_GROUP_OFFERED" =~ \ ]] && DH_GROUP_OFFERED="RFC7919/$DH_GROUP_OFFERED" + if [[ "$DH_GROUP_OFFERED" =~ ffdhe ]] && [[ "$DH_GROUP_OFFERED" =~ \ ]]; then + out "common primes detected: "; pr_italic "$DH_GROUP_OFFERED" + fileout "$jsonID2" "INFO" "$DH_GROUP_OFFERED" "$cve" "$cwe" + # Now (below) size matters -- i.e. the bit size. As this is about a known prime we label it more strict. + # This needs maybe needs another thought as it could appear inconsistent with run_pfs and elsewhere. + # for now we label the bit size similar in the screen, but distinguish the leading text for logjam before + elif [[ $DH_GROUP_LEN_P -le 800 ]]; then + pr_svrty_critical "VULNERABLE (NOT ok):"; out " common prime: " + fileout "$jsonID2" "CRITICAL" "$DH_GROUP_OFFERED" "$cve" "$cwe" + pr_dh "$DH_GROUP_OFFERED" $DH_GROUP_LEN_P + elif [[ $DH_GROUP_LEN_P -le 1024 ]]; then + # really? Here we assume that 1024-bit common prime for nation states are worth and possible to precompute (TBC) + # otherwise 1024 are just medium + pr_svrty_high "VULNERABLE (NOT ok):"; out " common prime: " + fileout "$jsonID2" "HIGH" "$DH_GROUP_OFFERED" "$cve" "$cwe" + pr_dh "$DH_GROUP_OFFERED" $DH_GROUP_LEN_P + elif [[ $DH_GROUP_LEN_P -le 1536 ]]; then + pr_svrty_low "common prime: " + fileout "$jsonID2" "LOW" "$DH_GROUP_OFFERED" "$cve" "$cwe" + pr_dh "$DH_GROUP_OFFERED" $DH_GROUP_LEN_P + else + out "common prime with $DH_GROUP_LEN_P bits detected: " + fileout "$jsonID2" "INFO" "$DH_GROUP_OFFERED" "$cve" "$cwe" + pr_dh "$DH_GROUP_OFFERED" $DH_GROUP_LEN_P + fi +} + + +# see https://weakdh.org/logjam.html +run_logjam() { + local -i sclient_success=0 + local exportdh_cipher_list="EXP1024-DHE-DSS-DES-CBC-SHA:EXP1024-DHE-DSS-RC4-SHA:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA" + local exportdh_cipher_list_hex="00,63, 00,65, 00,14, 00,11" + local all_dh_ciphers="cc,15, 00,b3, 00,91, c0,97, 00,a3, 00,9f, cc,aa, c0,a3, c0,9f, 00,6b, 00,6a, 00,39, 00,38, 00,c4, 00,c3, 00,88, 00,87, 00,a7, 00,6d, 00,3a, 00,c5, 00,89, 00,ab, cc,ad, c0,a7, c0,43, c0,45, c0,47, c0,53, c0,57, c0,5b, c0,67, c0,6d, c0,7d, c0,81, c0,85, c0,91, 00,a2, 00,9e, c0,a2, c0,9e, 00,aa, c0,a6, 00,67, 00,40, 00,33, 00,32, 00,be, 00,bd, 00,9a, 00,99, 00,45, 00,44, 00,a6, 00,6c, 00,34, 00,bf, 00,9b, 00,46, 00,b2, 00,90, c0,96, c0,42, c0,44, c0,46, c0,52, c0,56, c0,5a, c0,66, c0,6c, c0,7c, c0,80, c0,84, c0,90, 00,66, 00,18, 00,8e, 00,16, 00,13, 00,1b, 00,8f, 00,63, 00,15, 00,12, 00,1a, 00,65, 00,14, 00,11, 00,19, 00,17, 00,b5, 00,b4, 00,2d" # 93 ciphers + local -i i nr_supported_ciphers=0 server_key_exchange_len=0 ephemeral_pub_len=0 + local addtl_warning="" hexc + local -i ret=0 subret=0 + local server_key_exchange key_bitstring="" + local spaces=" " + local vuln_exportdh_ciphers=false + local openssl_no_expdhciphers=false + local str="" + local using_sockets=true + local cve="CVE-2015-4000" + local cwe="CWE-310" + local hint="" + local jsonID="LOGJAM" + local jsonID2="${jsonID}-common_primes" + + [[ $VULN_COUNT -le $VULN_THRESHLD ]] && outln && pr_headlineln " Testing for LOGJAM vulnerability " && outln + pr_bold " LOGJAM"; out " ($cve), experimental " + + "$SSL_NATIVE" && using_sockets=false + # Also as the openssl binary distributed has everything we need measurements show that + # there's no impact whether we use sockets or TLS here, so the default is sockets here + if ! "$using_sockets"; then + nr_supported_ciphers=$(count_ciphers $(actually_supported_osslciphers $exportdh_cipher_list)) + debugme echo $nr_supported_ciphers + case $nr_supported_ciphers in + 0) prln_local_problem "$OPENSSL doesn't have any DH EXPORT ciphers configured" + fileout "$jsonID" "WARN" "Not tested. $OPENSSL doesn't support any DH EXPORT ciphers" "$cve" "$cwe" + out "$spaces" + openssl_no_expdhciphers=true + ;; + 1|2|3) addtl_warning=" ($magenta""tested w/ $nr_supported_ciphers/4 ciphers only!$off)" ;; + 4) ;; + esac + fi + + # test for DH export ciphers first + if "$using_sockets"; then + tls_sockets "03" "$exportdh_cipher_list_hex, 00,ff" + sclient_success=$? + [[ $sclient_success -eq 2 ]] && sclient_success=0 + [[ $sclient_success -eq 0 ]] && vuln_exportdh_ciphers=true + elif [[ $nr_supported_ciphers -ne 0 ]]; then + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -cipher $exportdh_cipher_list -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE we should treat that some place else before + fi + if [[ "$DH_GROUP_OFFERED" =~ Unknown ]]; then + subret=0 # no common DH key detected + else + subret=1 # known prime/DH key + fi + elif [[ -z "$DH_GROUP_OFFERED" ]]; then + subret=3 + fi + + # Now if we have DH export ciphers we print them out first + if "$vuln_exportdh_ciphers"; then + pr_svrty_high "VULNERABLE (NOT ok):"; out " uses DH EXPORT ciphers" + fileout "$jsonID" "HIGH" "VULNERABLE, uses DH EXPORT ciphers" "$cve" "$cwe" "$hint" + if [[ $subret -eq 3 ]]; then + out ", no DH key detected with <= TLS 1.2" + fileout "$jsonID2" "OK" "no DH key detected with <= TLS 1.2" + elif [[ $subret -eq 1 ]]; then + out "\n${spaces}" + out_common_prime "$jsonID2" "$cve" "$cwe" + elif [[ $subret -eq 0 ]]; then + out " no common primes detected" + fileout "$jsonID2" "INFO" "--" "$cve" "$cwe" + elif [[ $ret -eq 1 ]]; then + out "FIXME 1" + fi + else + if [[ $subret -eq 1 ]]; then + out_common_prime "$jsonID2" "$cve" "$cwe" + if ! "$openssl_no_expdhciphers"; then + outln "," + out "${spaces}but no DH EXPORT ciphers${addtl_warning}" + fileout "$jsonID" "OK" "not vulnerable, no DH EXPORT ciphers,$addtl_warning" "$cve" "$cwe" + fi + elif [[ $subret -eq 3 ]]; then + pr_svrty_good "not vulnerable (OK):"; out " no DH EXPORT ciphers${addtl_warning}" + fileout "$jsonID" "OK" "not vulnerable, no DH EXPORT ciphers,$addtl_warning" "$cve" "$cwe" + out ", no DH key detected with <= TLS 1.2" + fileout "$jsonID2" "OK" "no DH key with <= TLS 1.2" "$cve" "$cwe" + elif [[ $subret -eq 0 ]]; then + pr_svrty_good "not vulnerable (OK):"; out " no DH EXPORT ciphers${addtl_warning}" + fileout "$jsonID" "OK" "not vulnerable, no DH EXPORT ciphers,$addtl_warning" "$cve" "$cwe" + # we issue a special warning if there's no common prime but the bit length is too low + if [[ $DH_GROUP_LEN_P -le 1024 ]]; then + out "\n${spaces}But: " + pr_dh "$DH_GROUP_OFFERED" $DH_GROUP_LEN_P + case $? in + 1) fileout "$jsonID" "CRITICAL" "no DH EXPORT ciphers, no common prime but $DH_GROUP_OFFERED has only $DH_GROUP_LEN_P bits, $addtl_warning" "$cve" "$cwe" ;; + 2) fileout "$jsonID" "HIGH" "no DH EXPORT ciphers, no common prime but $DH_GROUP_OFFERED has only $DH_GROUP_LEN_P bits, $addtl_warning" "$cve" "$cwe";; + 3) fileout "$jsonID" "MEDIUM" "no DH EXPORT ciphers, no common prime but $DH_GROUP_OFFERED has only $DH_GROUP_LEN_P bits, $addtl_warning" "$cve" "$cwe";; + esac + else + out ", no common prime detected" + fileout "$jsonID2" "OK" "--" "$cve" "$cwe" + fi + elif [[ $ret -eq 1 ]]; then + pr_svrty_good "partly not vulnerable:"; out " no DH EXPORT ciphers${addtl_warning}" + fileout "$jsonID" "OK" "not vulnerable, no DH EXPORT ciphers,$addtl_warning" "$cve" "$cwe" + fi + fi + + outln + tmpfile_handle ${FUNCNAME[0]}.txt + return $ret +} + +# Decrypting RSA with Obsolete and Weakened eNcryption, more @ https://drownattack.com/ +run_drown() { + local -i nr_ciphers_detected ret=0 + local spaces=" " + local cert_fingerprint_sha2="" + local cve="CVE-2016-0800 CVE-2016-0703" + local cwe="CWE-310" + local hint="" + local jsonID="DROWN" + + if [[ $VULN_COUNT -le $VULN_THRESHLD ]]; then + outln + pr_headlineln " Testing for DROWN vulnerability " + outln + fi +# if we want to use OPENSSL: check for < openssl 1.0.2g, openssl 1.0.1s if native openssl + pr_bold " DROWN"; out " (${cve// /, }) " + + # Any fingerprint that is placed in $RSA_CERT_FINGERPRINT_SHA2 is also added to + # to $CERT_FINGERPRINT_SHA2, so if $CERT_FINGERPRINT_SHA2 is not empty, but + # $RSA_CERT_FINGERPRINT_SHA2 is empty, then the server doesn't have an RSA certificate. + if [[ -z "$CERT_FINGERPRINT_SHA2" ]]; then + get_host_cert "-cipher aRSA" + [[ $? -eq 0 ]] && cert_fingerprint_sha2="$($OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha256 2>>$ERRFILE | sed -e 's/^.*Fingerprint=//' -e 's/://g' )" + else + cert_fingerprint_sha2="$RSA_CERT_FINGERPRINT_SHA2" + cert_fingerprint_sha2=${cert_fingerprint_sha2/SHA256 /} + fi + + if ( [[ "$STARTTLS_PROTOCOL" =~ ldap ]] || [[ "$STARTTLS_PROTOCOL" =~ irc ]] ); then + prln_local_problem "STARTTLS/$STARTTLS_PROTOCOL and --ssl-native collide here" + return 1 + fi + + if [[ $(has_server_protocol ssl2) -ne 1 ]]; then + sslv2_sockets + else + [[ aaa == bbb ]] # provoke retrurn code=1 + fi + + case $? in + 7) # strange reply, couldn't convert the cipher spec length to a hex number + pr_fixme "strange v2 reply " + outln " (rerun with DEBUG >=2)" + [[ $DEBUG -ge 3 ]] && hexdump -C "$TEMPDIR/$NODEIP.sslv2_sockets.dd" | head -1 + fileout "$jsonID" "WARN" "received a strange SSLv2 reply (rerun with DEBUG>=2)" "$cve" "$cwe" + ret=1 + ;; + 3) # vulnerable, [[ -n "$cert_fingerprint_sha2" ]] test is not needed as we should have RSA certificate here + lines=$(count_lines "$(hexdump -C "$TEMPDIR/$NODEIP.sslv2_sockets.dd" 2>/dev/null)") + debugme tm_out " ($lines lines) " + add_tls_offered ssl2 yes + if [[ "$lines" -gt 1 ]]; then + nr_ciphers_detected=$((V2_HELLO_CIPHERSPEC_LENGTH / 3)) + if [[ 0 -eq "$nr_ciphers_detected" ]]; then + prln_svrty_high "CVE-2015-3197: SSLv2 supported but couldn't detect a cipher (NOT ok)"; + fileout "$jsonID" "HIGH" "SSLv2 offered, but could not detect a cipher. Make sure you don't use this certificate elsewhere, see https://censys.io/ipv4?q=$cert_fingerprint_sha2" "$cve CVE-2015-3197" "$cwe" "$hint" + else + prln_svrty_critical "VULNERABLE (NOT ok), SSLv2 offered with $nr_ciphers_detected ciphers"; + fileout "$jsonID" "CRITICAL" "VULNERABLE, SSLv2 offered with $nr_ciphers_detected ciphers. Make sure you don't use this certificate elsewhere, see https://censys.io/ipv4?q=$cert_fingerprint_sha2" "$cve" "$cwe" "$hint" + fi + outln "$spaces Make sure you don't use this certificate elsewhere, see:" + out "$spaces " + pr_url "https://censys.io/ipv4?q=$cert_fingerprint_sha2" + outln + fi + ;; + *) prln_svrty_best "not vulnerable on this host and port (OK)" + fileout "$jsonID" "OK" "not vulnerable on this host and port" "$cve" "$cwe" + if [[ -n "$cert_fingerprint_sha2" ]]; then + outln "$spaces make sure you don't use this certificate elsewhere with SSLv2 enabled services" + out "$spaces " + pr_url "https://censys.io/ipv4?q=$cert_fingerprint_sha2" + outln " could help you to find out" + fileout "${jsonID}_hint" "INFO" "Make sure you don't use this certificate elsewhere with SSLv2 enabled services, see https://censys.io/ipv4?q=$cert_fingerprint_sha2" "$cve" "$cwe" + else + outln "$spaces no RSA certificate, thus certificate can't be used with SSLv2 elsewhere" + fileout "${jsonID}_hint" "INFO" "no RSA certificate, can't be used with SSLv2 elsewhere" "$cve" "$cwe" + fi + ;; + esac + + return $ret +} + + + +# Browser Exploit Against SSL/TLS: don't use CBC Ciphers in SSLv3 TLSv1.0 +run_beast(){ + local hexc dash cbc_cipher sslvers auth mac export + local -a ciph hexcode normalized_hexcode kx enc export2 + local proto proto_hex + local -i i subret nr_ciphers=0 sclient_success=0 + local detected_cbc_ciphers="" ciphers_to_test + local higher_proto_supported="" + local vuln_beast=false + local spaces=" " + local cr=$'\n' + local first=true + local continued=false + local cbc_cipher_list="EXP-RC2-CBC-MD5:IDEA-CBC-SHA:EXP-DES-CBC-SHA:DES-CBC-SHA:DES-CBC3-SHA:EXP-DH-DSS-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:DH-DSS-DES-CBC3-SHA:EXP-DH-RSA-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-RSA-DES-CBC3-SHA:EXP-EDH-DSS-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:EDH-DSS-DES-CBC3-SHA:EXP-EDH-RSA-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EXP-ADH-DES-CBC-SHA:ADH-DES-CBC-SHA:ADH-DES-CBC3-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-IDEA-CBC-SHA:KRB5-DES-CBC-MD5:KRB5-DES-CBC3-MD5:KRB5-IDEA-CBC-MD5:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-MD5:EXP-KRB5-RC2-CBC-MD5:AES128-SHA:DH-DSS-AES128-SHA:DH-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DHE-RSA-AES128-SHA:ADH-AES128-SHA:AES256-SHA:DH-DSS-AES256-SHA:DH-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ADH-AES256-SHA:CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DHE-RSA-CAMELLIA128-SHA:ADH-CAMELLIA128-SHA:EXP1024-RC2-CBC-MD5:EXP1024-DES-CBC-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DHE-RSA-CAMELLIA256-SHA:ADH-CAMELLIA256-SHA:PSK-3DES-EDE-CBC-SHA:PSK-AES128-CBC-SHA:PSK-AES256-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-AES128-CBC-SHA:DHE-PSK-AES256-CBC-SHA:RSA-PSK-3DES-EDE-CBC-SHA:RSA-PSK-AES128-CBC-SHA:RSA-PSK-AES256-CBC-SHA:SEED-SHA:DH-DSS-SEED-SHA:DH-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DHE-RSA-SEED-SHA:ADH-SEED-SHA:PSK-AES128-CBC-SHA256:PSK-AES256-CBC-SHA384:DHE-PSK-AES128-CBC-SHA256:DHE-PSK-AES256-CBC-SHA384:RSA-PSK-AES128-CBC-SHA256:RSA-PSK-AES256-CBC-SHA384:ECDH-ECDSA-DES-CBC3-SHA:ECDH-ECDSA-AES128-SHA:ECDH-ECDSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:AECDH-DES-CBC3-SHA:AECDH-AES128-SHA:AECDH-AES256-SHA:SRP-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-AES-128-CBC-SHA:SRP-RSA-AES-128-CBC-SHA:SRP-DSS-AES-128-CBC-SHA:SRP-AES-256-CBC-SHA:SRP-RSA-AES-256-CBC-SHA:SRP-DSS-AES-256-CBC-SHA:ECDHE-PSK-3DES-EDE-CBC-SHA:ECDHE-PSK-AES128-CBC-SHA:ECDHE-PSK-AES256-CBC-SHA:ECDHE-PSK-AES128-CBC-SHA256:ECDHE-PSK-AES256-CBC-SHA384:PSK-CAMELLIA128-SHA256:PSK-CAMELLIA256-SHA384:DHE-PSK-CAMELLIA128-SHA256:DHE-PSK-CAMELLIA256-SHA384:RSA-PSK-CAMELLIA128-SHA256:RSA-PSK-CAMELLIA256-SHA384:ECDHE-PSK-CAMELLIA128-SHA256:ECDHE-PSK-CAMELLIA256-SHA384" + local cbc_ciphers_hex="00,06, 00,07, 00,08, 00,09, 00,0A, 00,0B, 00,0C, 00,0D, 00,0E, 00,0F, 00,10, 00,11, 00,12, 00,13, 00,14, 00,15, 00,16, 00,19, 00,1A, 00,1B, 00,1E, 00,1F, 00,21, 00,22, 00,23, 00,25, 00,26, 00,27, 00,29, 00,2A, 00,2F, 00,30, 00,31, 00,32, 00,33, 00,34, 00,35, 00,36, 00,37, 00,38, 00,39, 00,3A, 00,41, 00,42, 00,43, 00,44, 00,45, 00,46, 00,61, 00,62, 00,63, 00,84, 00,85, 00,86, 00,87, 00,88, 00,89, 00,8B, 00,8C, 00,8D, 00,8F, 00,90, 00,91, 00,93, 00,94, 00,95, 00,96, 00,97, 00,98, 00,99, 00,9A, 00,9B, 00,AE, 00,AF, 00,B2, 00,B3, 00,B6, 00,B7, C0,03, C0,04, C0,05, C0,08, C0,09, C0,0A, C0,0D, C0,0E, C0,0F, C0,12, C0,13, C0,14, C0,17, C0,18, C0,19, C0,1A, C0,1B, C0,1C, C0,1D, C0,1E, C0,1F, C0,21, C0,22, C0,34, C0,35, C0,36, C0,37, C0,38, C0,64, C0,65, C0,66, C0,67, C0,68, C0,69, C0,70, C0,71, C0,94, C0,95, C0,96, C0,97, C0,98, C0,99, C0,9A, C0,9B, FE,FE, FE,FF, FF,E0, FF,E1" + local has_dh_bits="$HAS_DH_BITS" + local using_sockets=true + local cve="CVE-2011-3389" + local cwe="CWE-20" + local hint="" + local jsonID="BEAST" + + if [[ $VULN_COUNT -le $VULN_THRESHLD ]]; then + outln + pr_headlineln " Testing for BEAST vulnerability " + outln + fi + pr_bold " BEAST"; out " ($cve) " + + if "$TLS13_ONLY" || ( [[ $(has_server_protocol ssl3) -eq 1 ]] && [[ $(has_server_protocol tls1) -eq 1 ]] ); then + pr_svrty_good "not vulnerable (OK)" + outln ", no SSL3 or TLS1" + fileout "$jsonID" "OK" "not vulnerable, no SSL3 or TLS1" "$cve" "$cwe" + return 0 + fi + + "$SSL_NATIVE" && using_sockets=false + # $cbc_ciphers_hex has 126 ciphers, we omitted SRP-AES-256-CBC-SHA bc the trailing 00,ff below will pose + # a problem for ACE loadbalancers otherwise. So in case we know this is not true, we'll re-add it + ! "$SERVER_SIZE_LIMIT_BUG" && "$using_sockets" && cbc_ciphers_hex="$cbc_ciphers_hex, C0,20" + + [[ $TLS_NR_CIPHERS == 0 ]] && using_sockets=false + if "$using_sockets" || [[ $OSSL_VER_MAJOR -lt 1 ]]; then + for (( i=0; i < TLS_NR_CIPHERS; i++ )); do + hexc="${TLS_CIPHER_HEXCODE[i]}" + if [[ ${#hexc} -eq 9 ]] && [[ "${TLS_CIPHER_RFC_NAME[i]}" =~ CBC ]] && \ + [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA256 ]] && [[ ! "${TLS_CIPHER_RFC_NAME[i]}" =~ SHA384 ]]; then + ciph[nr_ciphers]="${TLS_CIPHER_OSSL_NAME[i]}" + hexcode[nr_ciphers]="${hexc:2:2},${hexc:7:2}" + rfc_ciph[nr_ciphers]="${TLS_CIPHER_RFC_NAME[i]}" + kx[nr_ciphers]="${TLS_CIPHER_KX[i]}" + enc[nr_ciphers]="${TLS_CIPHER_ENC[i]}" + export2[nr_ciphers]="${TLS_CIPHER_EXPORT[i]}" + ossl_supported[nr_ciphers]=${TLS_CIPHER_OSSL_SUPPORTED[i]} + if "$using_sockets" && "$WIDE" && ! "$has_dh_bits" && \ + ( [[ ${kx[nr_ciphers]} == Kx=ECDH ]] || [[ ${kx[nr_ciphers]} == Kx=DH ]] || [[ ${kx[nr_ciphers]} == Kx=EDH ]] ); then + ossl_supported[nr_ciphers]=false + fi + if [[ "${hexc:2:2}" == 00 ]]; then + normalized_hexcode[nr_ciphers]="x${hexc:7:2}" + else + normalized_hexcode[nr_ciphers]="x${hexc:2:2}${hexc:7:2}" + fi + nr_ciphers+=1 + fi + done + else + # no sockets, openssl + while read hexc dash ciph[nr_ciphers] sslvers kx[nr_ciphers] auth enc[nr_ciphers] mac export2[nr_ciphers]; do + if [[ ":${cbc_cipher_list}:" =~ :${ciph[nr_ciphers]}: ]]; then + ossl_supported[nr_ciphers]=true + if [[ "${hexc:2:2}" == "00" ]]; then + normalized_hexcode[nr_ciphers]="x${hexc:7:2}" + else + normalized_hexcode[nr_ciphers]="x${hexc:2:2}${hexc:7:2}" + fi + nr_ciphers+=1 + fi + done < <(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-tls1 -V") + fi + + # first determine whether it's mitigated by higher protocols + for proto in tls1_1 tls1_2; do + subret=$(has_server_protocol "$proto") + if [[ $subret -eq 0 ]]; then + case $proto in + tls1_1) higher_proto_supported+=" TLSv1.1" ;; + tls1_2) higher_proto_supported+=" TLSv1.2" ;; + esac + elif [[ $subret -eq 2 ]]; then + $OPENSSL s_client $(s_client_options "-state -"${proto}" $STARTTLS $BUGS -connect $NODEIP:$PORT $PROXY $SNI") 2>>$ERRFILE >$TMPFILE $TMPFILE 2>>$ERRFILE $TMPFILE 2>>$ERRFILE $TMPFILE 2>>$ERRFILE 1.0 + pr_svrty_low "VULNERABLE" + outln " -- but also supports higher protocols (possible mitigation) $higher_proto_supported" + outln + else + out "$spaces" + pr_svrty_low "VULNERABLE" + outln " -- but also supports higher protocols $higher_proto_supported (likely mitigated)" + fi + fileout "$jsonID" "LOW" "VULNERABLE -- but also supports higher protocols $higher_proto_supported (likely mitigated)" "$cve" "$cwe" "$hint" + else + if "$WIDE"; then + outln + else + out "$spaces" + fi + pr_svrty_medium "VULNERABLE" + outln " -- and no higher protocols as mitigation supported" + fileout "$jsonID" "MEDIUM" "VULNERABLE -- and no higher protocols as mitigation supported" "$cve" "$cwe" "$hint" + fi + fi + "$first" && ! "$vuln_beast" && prln_svrty_good "no CBC ciphers found for any protocol (OK)" + + "$using_sockets" && HAS_DH_BITS="$has_dh_bits" + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +# https://web.archive.org/web/20200324101422/http://www.isg.rhul.ac.uk/tls/Lucky13.html +# in a nutshell: don't offer CBC suites (again). MAC as a fix for padding oracles is not enough. Best: TLS v1.2+ AES GCM +run_lucky13() { + local spaces=" " + local cbc_ciphers="ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:SRP-DSS-AES-256-CBC-SHA:SRP-RSA-AES-256-CBC-SHA:SRP-AES-256-CBC-SHA:RSA-PSK-AES256-CBC-SHA384:DHE-PSK-AES256-CBC-SHA384:DHE-PSK-AES256-CBC-SHA:ECDHE-PSK-CAMELLIA256-SHA384:RSA-PSK-CAMELLIA256-SHA384:DHE-PSK-CAMELLIA256-SHA384:PSK-AES256-CBC-SHA384:PSK-CAMELLIA256-SHA384:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DH-RSA-AES256-SHA256:DH-DSS-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CAMELLIA256-SHA384:DHE-RSA-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA256:DH-RSA-CAMELLIA256-SHA256:DH-DSS-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:AECDH-AES256-SHA:ADH-AES256-SHA256:ADH-AES256-SHA:ADH-CAMELLIA256-SHA256:ADH-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA384:ECDH-ECDSA-AES256-SHA384:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:ECDH-RSA-CAMELLIA256-SHA384:ECDH-ECDSA-CAMELLIA256-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA256:ECDHE-PSK-AES256-CBC-SHA384:ECDHE-PSK-AES256-CBC-SHA:CAMELLIA256-SHA:RSA-PSK-AES256-CBC-SHA:PSK-AES256-CBC-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:SRP-DSS-AES-128-CBC-SHA:SRP-RSA-AES-128-CBC-SHA:SRP-AES-128-CBC-SHA:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DH-RSA-AES128-SHA256:DH-DSS-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA256:DH-RSA-CAMELLIA128-SHA256:DH-DSS-CAMELLIA128-SHA256:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:AECDH-AES128-SHA:ADH-AES128-SHA256:ADH-AES128-SHA:ADH-CAMELLIA128-SHA256:ADH-SEED-SHA:ADH-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA256:ECDH-ECDSA-AES128-SHA256:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:ECDH-RSA-CAMELLIA128-SHA256:ECDH-ECDSA-CAMELLIA128-SHA256:AES128-SHA256:AES128-SHA:CAMELLIA128-SHA256:ECDHE-PSK-AES128-CBC-SHA256:ECDHE-PSK-AES128-CBC-SHA:RSA-PSK-AES128-CBC-SHA256:DHE-PSK-AES128-CBC-SHA256:DHE-PSK-AES128-CBC-SHA:SEED-SHA:CAMELLIA128-SHA:ECDHE-PSK-CAMELLIA128-SHA256:RSA-PSK-CAMELLIA128-SHA256:DHE-PSK-CAMELLIA128-SHA256:PSK-AES128-CBC-SHA256:PSK-CAMELLIA128-SHA256:IDEA-CBC-SHA:RSA-PSK-AES128-CBC-SHA:PSK-AES128-CBC-SHA:KRB5-IDEA-CBC-SHA:KRB5-IDEA-CBC-MD5:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:ADH-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-ADH-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-MD5:EXP-KRB5-DES-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA" + cbc_ciphers_hex1="c0,28, c0,24, c0,14, c0,0a, c0,22, c0,21, c0,20, 00,b7, 00,b3, 00,91, c0,9b, c0,99, c0,97, 00,af, c0,95, 00,6b, 00,6a, 00,69, 00,68, 00,39, 00,38, 00,37, 00,36, c0,77, c0,73, 00,c4, 00,c3, 00,c2, 00,c1, 00,88, 00,87, 00,86, 00,85, c0,19, 00,6d, 00,3a, 00,c5, 00,89, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, 00,3d, 00,35, 00,c0, c0,38, c0,36, 00,84, 00,95, 00,8d, c0,3d, c0,3f, c0,41, c0,43, c0,45, c0,47, c0,49, c0,4b, c0,4d, c0,4f, c0,65, c0,67, c0,69, c0,71, c0,27, c0,23, c0,13, c0,09, c0,1f, c0,1e, c0,1d, 00,67, 00,40, 00,3f, 00,3e, 00,33, 00,32, 00,31, 00,30, c0,76, c0,72, 00,be, 00,bd, 00,bc, 00,bb, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,18, 00,6c, 00,34, 00,bf, 00,9b, 00,46, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, 00,3c, 00,2f, 00,ba" + cbc_ciphers_hex2="c0,37, c0,35, 00,b6, 00,b2, 00,90, 00,96, 00,41, c0,9a, c0,98, c0,96, 00,ae, c0,94, 00,07, 00,94, 00,8c, 00,21, 00,25, c0,3c, c0,3e, c0,40, c0,42, c0,44, c0,46, c0,48, c0,4a, c0,4c, c0,4e, c0,64, c0,66, c0,68, c0,70, c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, fe,ff, ff,e0, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,1a, 00,62, 00,09, 00,61, 00,1e, 00,22, fe,fe, ff,e1, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e" + local has_dh_bits="$HAS_DH_BITS" + local -i nr_supported_ciphers=0 sclient_success + local using_sockets=true + local cve="CVE-2013-0169" + local cwe="CWE-310" + local hint="" + local jsonID="LUCKY13" + + if [[ $VULN_COUNT -le $VULN_THRESHLD ]]; then + outln + pr_headlineln " Testing for LUCKY13 vulnerability " + outln + fi + pr_bold " LUCKY13"; out " ($cve), experimental " + + if "$TLS13_ONLY"; then + pr_svrty_best "not vulnerable (OK)" + [[ $DEBUG -ge 1 ]] && out ", no CBC ciphers in TLS 1.3 only servers" + outln + fileout "$jsonID" "OK" "not vulnerable, TLS 1.3 only" "$cve" "$cwe" + return 0 + fi + + "$SSL_NATIVE" && using_sockets=false + # The openssl binary distributed has almost everything we need (PSK, KRB5 ciphers and feff, ffe0 are typically missing). + # Measurements show that there's little impact whether we use sockets or TLS here, so the default is sockets here + + if "$using_sockets"; then + tls_sockets "03" "${cbc_ciphers_hex1}, 00,ff" + sclient_success=$? + [[ "$sclient_success" -eq 2 ]] && sclient_success=0 + if [[ $sclient_success -ne 0 ]]; then + tls_sockets "03" "${cbc_ciphers_hex2}, 00,ff" + sclient_success=$? + [[ $sclient_success -eq 2 ]] && sclient_success=0 + fi + else + nr_cbc_ciphers=$(count_ciphers $cbc_ciphers) + nr_supported_ciphers=$(count_ciphers $(actually_supported_osslciphers $cbc_ciphers)) + $OPENSSL s_client $(s_client_options "$STARTTLS $BUGS -no_ssl2 -cipher $cbc_ciphers -connect $NODEIP:$PORT $PROXY $SNI") >$TMPFILE 2>$ERRFILE >$ERRFILE) + fi + + if "$using_sockets" && [[ -n "$sslv2_ciphers_hex" ]]; then + sslv2_sockets "${sslv2_ciphers_hex:2}" "true" + if [[ $? -eq 3 ]] && [[ "$V2_HELLO_CIPHERSPEC_LENGTH" -ne 0 ]]; then + supported_sslv2_ciphers="$(grep "Supported cipher: " "$TEMPDIR/$NODEIP.parse_sslv2_serverhello.txt")" + "$WIDE" && "$SHOW_SIGALGO" && s="$(read_sigalg_from_file "$HOSTCERT")" + for (( i=0 ; i$TMPFILE 2>$ERRFILE $TMPFILE 2>$ERRFILE 00 ." + # However, for each test except testnum=0 the padding will be + # made incorrect in some way, as specified below. + + # Determine the length of the public key and create the bytes. + # should be a length that makes total length of $padded_pms + # the same as the length of the public key. should contain no 00 bytes. + pubkeybits="$($OPENSSL x509 -noout -pubkey -in $HOSTCERT 2>>$ERRFILE | \ + $OPENSSL pkey -pubin -text 2>>$ERRFILE | awk -F'(' '/Public-Key/ { print $2 }')" + pubkeybits="${pubkeybits%%bit*}" + pubkeybytes=$pubkeybits/8 + [[ $((pubkeybits%8)) -ne 0 ]] && pubkeybytes+=1 + rnd_pad="" + for (( len=0; len < pubkeybytes-52; len=len+2 )); do + rnd_pad+="abcd" + done + [[ $len -eq $pubkeybytes-52 ]] && rnd_pad+="ab" + + case "$testnum" in + # correct padding + 0) padded_pms="0002${rnd_pad}00${DETECTED_TLS_VERSION}${rnd_pms}" ;; + # wrong first two bytes + 1) padded_pms="4117${rnd_pad}00${DETECTED_TLS_VERSION}${rnd_pms}" ;; + # 0x00 on a wrong position + 2) padded_pms="0002${rnd_pad}11${rnd_pms}0011" ;; + # no 0x00 in the middle + 3) padded_pms="0002${rnd_pad}111111${rnd_pms}" ;; + # wrong version number (according to Klima / Pokorny / Rosa paper) + 4) padded_pms="0002${rnd_pad}000202${rnd_pms}" ;; + esac + + # Encrypt the padded premaster secret using the server's public key. + encrypted_pms="$(asciihex_to_binary "$padded_pms" | \ + $OPENSSL pkeyutl -encrypt -certin -inkey $HOSTCERT -pkeyopt rsa_padding_mode:none 2>/dev/null | \ + hexdump -v -e '16/1 "%02x"')" + if [[ -z "$encrypted_pms" ]]; then + if [[ "$DETECTED_TLS_VERSION" == "0300" ]]; then + socksend ",x15, x03, x00, x00, x02, x02, x00" 0 + else + socksend ",x15, x03, x01, x00, x02, x02, x00" 0 + fi + close_socket + prln_fixme "Conversion of public key failed around line $((LINENO - 9))" + fileout "$jsonID" "WARN" "Conversion of public key failed around line $((LINENO - 10)) " + return 1 + fi + + # Create the client key exchange message. + len=${#encrypted_pms}/2 + cke_prefix="16${DETECTED_TLS_VERSION}$(printf "%04x" $((len+6)))10$(printf "%06x" $((len+2)))$(printf "%04x" $len)" + encrypted_pms="$cke_prefix$encrypted_pms" + len=${#encrypted_pms} + client_key_exchange="" + for (( i=0; i/dev/null ; then + readlink -f ls &>/dev/null && \ + TESTSSL_INSTALL_DIR="$(readlink -f "$(basename "${BASH_SOURCE[0]}")")" || \ + TESTSSL_INSTALL_DIR="$(readlink "$(basename "${BASH_SOURCE[0]}")")" + # not sure whether Darwin has -f + TESTSSL_INSTALL_DIR="$(dirname "$TESTSSL_INSTALL_DIR" 2>/dev/null)" + [[ -r "$TESTSSL_INSTALL_DIR/cipher-mapping.txt" ]] && CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/cipher-mapping.txt" + [[ -r "$TESTSSL_INSTALL_DIR/etc/testssl/cipher-mapping.txt" ]] && CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/etc/testssl/cipher-mapping.txt" + fi + + # still no cipher mapping file: + if [[ ! -r "$CIPHERS_BY_STRENGTH_FILE" ]] && type -p realpath &>/dev/null ; then + TESTSSL_INSTALL_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]}")")" + CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/etc/testssl/cipher-mapping.txt" + [[ -r "$TESTSSL_INSTALL_DIR/cipher-mapping.txt" ]] && CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/cipher-mapping.txt" + fi + + # still no cipher mapping file (and realpath is not present): + if [[ ! -r "$CIPHERS_BY_STRENGTH_FILE" ]] && type -p readlink &>/dev/null ; then + readlink -f ls &>/dev/null && \ + TESTSSL_INSTALL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" || \ + TESTSSL_INSTALL_DIR="$(dirname "$(readlink "${BASH_SOURCE[0]}")")" + # not sure whether Darwin has -f + CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/etc/testssl/cipher-mapping.txt" + [[ -r "$TESTSSL_INSTALL_DIR/cipher-mapping.txt" ]] && CIPHERS_BY_STRENGTH_FILE="$TESTSSL_INSTALL_DIR/cipher-mapping.txt" + fi + + if [[ ! -r "$CIPHERS_BY_STRENGTH_FILE" ]]; then + DISPLAY_CIPHERNAMES="openssl-only" + debugme echo "$CIPHERS_BY_STRENGTH_FILE" + prln_warning "\nATTENTION: No cipher mapping file found!" + outln "Please note from 2.9 on $PROG_NAME needs files in \"\$TESTSSL_INSTALL_DIR/etc/testssl/\" to function correctly." + outln + ignore_no_or_lame "Type \"yes\" to ignore this warning and proceed at your own risk" "yes" + [[ $? -ne 0 ]] && exit $ERR_RESOURCE + fi + + TLS_DATA_FILE="$TESTSSL_INSTALL_DIR/etc/testssl/tls_data.txt" + if [[ ! -r "$TLS_DATA_FILE" ]]; then + prln_warning "\nATTENTION: No TLS data file found -- needed for socket-based handshakes" + outln "Please note from 2.9 on $PROG_NAME needs files in \"\$TESTSSL_INSTALL_DIR/etc/testssl/\" to function correctly." + outln + ignore_no_or_lame "Type \"yes\" to ignore this warning and proceed at your own risk" "yes" + [[ $? -ne 0 ]] && exit $ERR_RESOURCE + else + : # see #705, in a nutshell: not portable to initialize a global array inside a function. Thus it'll be done in main part below + fi +} + + +test_openssl_suffix() { + local naming_ext="$(uname).$(uname -m)" + local uname_arch="$(uname -m)" + local myarch_suffix="" + + [[ $uname_arch =~ 64 ]] && myarch_suffix=64 || myarch_suffix=32 + if [[ -f "$1/openssl" ]] && [[ -x "$1/openssl" ]]; then + OPENSSL="$1/openssl-bad" + return 0 + elif [[ -f "$1/openssl.$naming_ext" ]] && [[ -x "$1/openssl.$naming_ext" ]]; then + OPENSSL="$1/openssl.$naming_ext" + return 0 + elif [[ -f "$1/openssl.$uname_arch" ]] && [[ -x "$1/openssl.$uname_arch" ]]; then + OPENSSL="$1/openssl.$uname_arch" + return 0 + elif [[ -f "$1/openssl$myarch_suffix" ]] && [[ -x "$1/openssl$myarch_suffix" ]]; then + OPENSSL="$1/openssl$myarch_suffix" + return 0 + fi + return 1 +} + + +find_openssl_binary() { + local s_client_has=$TEMPDIR/s_client_has.txt + local s_client_starttls_has=$TEMPDIR/s_client_starttls_has.txt + local openssl_location cwd="" + local ossl_wo_dev_info + local curve + local -a curves_ossl=("sect163k1" "sect163r1" "sect163r2" "sect193r1" "sect193r2" "sect233k1" "sect233r1" "sect239k1" "sect283k1" "sect283r1" "sect409k1" "sect409r1" "sect571k1" "sect571r1" "secp160k1" "secp160r1" "secp160r2" "secp192k1" "prime192v1" "secp224k1" "secp224r1" "secp256k1" "prime256v1" "secp384r1" "secp521r1" "brainpoolP256r1" "brainpoolP384r1" "brainpoolP512r1" "X25519" "X448") + + # 0. check environment variable whether it's executable + if [[ -n "$OPENSSL" ]] && [[ ! -x "$OPENSSL" ]]; then + prln_warning "\ncannot find specified (\$OPENSSL=$OPENSSL) binary." + tmln_out " Looking some place else ..." + elif [[ -x "$OPENSSL" ]]; then + : # 1. all ok supplied $OPENSSL was found and has executable bit set -- testrun comes below + elif [[ -e "/mnt/c/Windows/System32/bash.exe" ]] && test_openssl_suffix "$(dirname "$(type -p openssl)")"; then + # 2. otherwise, only if on Bash on Windows, use system binaries only. + SYSTEM2="WSL" + elif test_openssl_suffix "$TESTSSL_INSTALL_DIR"; then + : # 3. otherwise try openssl in path of testssl.sh + elif test_openssl_suffix "$TESTSSL_INSTALL_DIR/bin"; then + : # 4. otherwise here, this is supposed to be the standard --platform independent path in the future!!! + elif test_openssl_suffix "$(dirname "$(type -p openssl)")"; then + : # 5. we tried hard and failed, so now we use the system binaries + fi + + # no ERRFILE initialized yet, thus we use /dev/null for stderr directly + $OPENSSL version -a 2>/dev/null >/dev/null + if [[ $? -ne 0 ]] || [[ ! -x "$OPENSSL" ]]; then + fatal "cannot exec or find any openssl binary" $ERR_OSSLBIN + fi + + # https://www.openssl.org/news/openssl-notes.html + OSSL_NAME=$($OPENSSL version 2>/dev/null | awk '{ print $1 }') + OSSL_VER=$($OPENSSL version 2>/dev/null | awk -F' ' '{ print $2 }') + OSSL_VER_MAJOR="${OSSL_VER%%\.*}" + ossl_wo_dev_info="${OSSL_VER%%-*}" + OSSL_VER_MINOR="${ossl_wo_dev_info#$OSSL_VER_MAJOR\.}" + OSSL_VER_MINOR="${OSSL_VER_MINOR%%[a-zA-Z]*}" + OSSL_VER_APPENDIX="${OSSL_VER#$OSSL_VER_MAJOR\.$OSSL_VER_MINOR}" + OSSL_VER_PLATFORM=$($OPENSSL version -p 2>/dev/null | sed 's/^platform: //') + OSSL_BUILD_DATE=$($OPENSSL version -a 2>/dev/null | grep '^built' | sed -e 's/built on//' -e 's/: ... //' -e 's/: //' -e 's/ UTC//' -e 's/ +0000//' -e 's/.000000000//') + + # see #190, reverting logic: unless otherwise proved openssl has no dh bits + case "$OSSL_VER_MAJOR.$OSSL_VER_MINOR" in + 1.0.2|1.1.0|1.1.1|3.0.0) HAS_DH_BITS=true ;; + esac + if [[ "$OSSL_NAME" =~ LibreSSL ]]; then + [[ ${OSSL_VER//./} -ge 210 ]] && HAS_DH_BITS=true + if "$SSL_NATIVE"; then + outln + pr_warning "LibreSSL in native ssl mode is not a good choice for testing INSECURE features!" + fi + fi + + initialize_engine + + openssl_location="$(type -p $OPENSSL)" + [[ -n "$GIT_REL" ]] && \ + cwd="$PWD" || \ + cwd="$RUN_DIR" + if [[ "$openssl_location" == ${PWD}/bin ]]; then + OPENSSL_LOCATION="\$PWD/bin/$(basename "$openssl_location")" + elif [[ "$openssl_location" =~ $cwd ]] && [[ "$cwd" != '.' ]]; then + OPENSSL_LOCATION="${openssl_location%%$cwd}" + else + OPENSSL_LOCATION="$openssl_location" + fi + + OSSL_CIPHERS_S="" + HAS_SSL2=false + HAS_SSL3=false + HAS_TLS13=false + HAS_X448=false + HAS_X25519=false + HAS_NO_SSL2=false + HAS_NOSERVERNAME=false + HAS_CIPHERSUITES=false + HAS_COMP=false + HAS_NO_COMP=false + HAS_CURVES=false + OSSL_SUPPORTED_CURVES="" + HAS_PKEY=false + HAS_PKUTIL=false + HAS_ALPN=false + HAS_NPN=false + HAS_FALLBACK_SCSV=false + HAS_PROXY=false + HAS_XMPP=false + HAS_POSTGRES=false + HAS_MYSQL=false + HAS_LMTP=false + HAS_NNTP=false + HAS_IRC=false + HAS_CHACHA20=false + HAS_AES128_GCM=false + HAS_AES256_GCM=false + HAS_ZLIB=false + + $OPENSSL ciphers -s 2>&1 | grep -aiq "unknown option" || \ + OSSL_CIPHERS_S="-s" + + # This and all other occurences we do a little trick using "invalid." to avoid plain and + # link level DNS lookups. See issue #1418 and https://tools.ietf.org/html/rfc6761#section-6.4 + $OPENSSL s_client -ssl2 -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_SSL2=true + + $OPENSSL s_client -ssl3 -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_SSL3=true + + $OPENSSL s_client -tls1_3 -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_TLS13=true + + $OPENSSL genpkey -algorithm X448 2>&1 | grep -aq "not found" || \ + HAS_X448=true + + $OPENSSL genpkey -algorithm X25519 2>&1 | grep -aq "not found" || \ + HAS_X25519=true + + $OPENSSL s_client -no_ssl2 -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_NO_SSL2=true + + $OPENSSL s_client -noservername -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_NOSERVERNAME=true + + $OPENSSL s_client -ciphersuites -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_CIPHERSUITES=true + + $OPENSSL s_client -comp -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_COMP=true + + $OPENSSL s_client -no_comp -connect invalid. 2>&1 | grep -aiq "unknown option" || \ + HAS_NO_COMP=true + + OPENSSL_NR_CIPHERS=$(count_ciphers "$(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL' 'ALL')") + + if $OPENSSL s_client -curves "${curves_ossl[0]}" -connect invalid. 2>&1 | grep -aiq "unknown option"; then + for curve in "${curves_ossl[@]}"; do + $OPENSSL s_client -groups $curve -connect invalid.:8443 2>&1 | grep -Eiaq "Error with command|unknown option|Failed to set groups" + [[ $? -ne 0 ]] && OSSL_SUPPORTED_CURVES+=" $curve " + done + else + HAS_CURVES=true + for curve in "${curves_ossl[@]}"; do + $OPENSSL s_client -curves $curve -connect invalid. 2>&1 | grep -Eiaq "Error with command|unknown option" + [[ $? -ne 0 ]] && OSSL_SUPPORTED_CURVES+=" $curve " + done + fi + + $OPENSSL pkey -help 2>&1 | grep -q Error || \ + HAS_PKEY=true + + $OPENSSL pkeyutl 2>&1 | grep -q Error || \ + HAS_PKUTIL=true + + # For the following we feel safe enough to query the s_client help functions. + # That was not good enough for the previous lookups + $OPENSSL s_client -help 2>$s_client_has + + $OPENSSL s_client -starttls foo 2>$s_client_starttls_has + + grep -qw '\-alpn' $s_client_has && \ + HAS_ALPN=true + + grep -qw '\-nextprotoneg' $s_client_has && \ + HAS_NPN=true + + grep -qw '\-fallback_scsv' $s_client_has && \ + HAS_FALLBACK_SCSV=true + + grep -q '\-proxy' $s_client_has && \ + HAS_PROXY=true + + grep -q '\-xmpp' $s_client_has && \ + HAS_XMPP=true + + grep -q 'postgres' $s_client_starttls_has && \ + HAS_POSTGRES=true + + grep -q 'mysql' $s_client_starttls_has && \ + HAS_MYSQL=true + + grep -q 'lmtp' $s_client_starttls_has && \ + HAS_LMTP=true + + grep -q 'nntp' $s_client_starttls_has && \ + HAS_NNTP=true + + grep -q 'irc' $s_client_starttls_has && \ + HAS_IRC=true + + $OPENSSL enc -chacha20 -K 12345678901234567890123456789012 -iv 01000000123456789012345678901234 > /dev/null 2> /dev/null <<< "test" + [[ $? -eq 0 ]] && HAS_CHACHA20=true + + $OPENSSL enc -aes-128-gcm -K 0123456789abcdef0123456789abcdef -iv 0123456789abcdef01234567 > /dev/null 2> /dev/null <<< "test" + [[ $? -eq 0 ]] && HAS_AES128_GCM=true + + $OPENSSL enc -aes-256-gcm -K 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef -iv 0123456789abcdef01234567 > /dev/null 2> /dev/null <<< "test" + [[ $? -eq 0 ]] && HAS_AES256_GCM=true + + [[ "$(echo -e "\x78\x9C\xAB\xCA\xC9\x4C\xE2\x02\x00\x06\x20\x01\xBC" | $OPENSSL zlib -d 2>/dev/null)" == zlib ]] && HAS_ZLIB=true + + if [[ -n "$CONNECT_TIMEOUT" ]] || [[ -n "$OPENSSL_TIMEOUT" ]]; then + # We don't set a general timeout as we might not have "timeout" installed and we only + # do what is instructed. Thus we check first what the command line params were, + # then we proceed + if type -p timeout >/dev/null 2>&1; then + # There are different versions of "timeout". Check whether --preserve-status is supported + if timeout --help 2>/dev/null | grep -q 'preserve-status'; then + TIMEOUT_CMD="timeout --preserve-status" + else + TIMEOUT_CMD="timeout" + fi + else + TIMEOUT_CMD="" + outln + fatal "You specified a connect or openssl timeout but the binary \"timeout\" couldn't be found " $ERR_RESOURCE + fi + fi + + if ! "$do_mass_testing"; then + if [[ -n $OPENSSL_TIMEOUT ]]; then + OPENSSL="$TIMEOUT_CMD $OPENSSL_TIMEOUT $OPENSSL" + fi + fi + + return 0 +} + + +check4openssl_oldfarts() { + case "$OSSL_VER" in + 0.9.7*|0.9.6*|0.9.5*) + # 0.9.5a was latest in 0.9.5 an released 2000/4/1, that'll NOT suffice for this test + old_fart ;; + 0.9.8) + case $OSSL_VER_APPENDIX in + a|b|c|d|e) old_fart;; # no SNI! + # other than that we leave this for MacOSX and FreeBSD but it's a pain and likely gives false negatives/positives + esac + ;; + esac + if [[ $OSSL_VER_MAJOR -lt 1 ]]; then ## mm: Patch for libressl + prln_warning " Your \"$OPENSSL\" is way too old (/dev/null; then + PRINTF="" + return 0 + fi + fatal "Neither external printf nor shell internal found. " $ERR_CLUELESS +} + + +help() { + cat << EOF + + "$PROG_NAME [options] " or "$PROG_NAME " + + +"$PROG_NAME ", where is: + + --help what you're looking at + -b, --banner displays banner + version of $PROG_NAME + -v, --version same as previous + -V, --local pretty print all local ciphers + -V, --local which local ciphers with are available? If pattern is not a number: word match + + is always an ignore case word pattern of cipher hexcode or any other string in the name, kx or bits + +"$PROG_NAME ", where is: + + host|host:port|URL|URL:port port 443 is default, URL can only contain HTTPS protocol) + +"$PROG_NAME [options] ", where [options] is: + + -t, --starttls Does a default run against a STARTTLS enabled + --xmpphost For STARTTLS enabled XMPP it supplies the XML stream to-'' domain -- sometimes needed + --mx Tests MX records from high to low priority (STARTTLS, port 25) + --file/-iL Mass testing option: Reads one testssl.sh command line per line from . + Can be combined with --serial or --parallel. Implicitly turns on "--warnings batch". + Text format 1: Comments via # allowed, EOF signals end of + Text format 2: nmap output in greppable format (-oG), 1 port per line allowed + --mode Mass testing to be done serial (default) or parallel (--parallel is shortcut for the latter) + --warnings "batch" doesn't continue when a testing error is encountered, off continues and skips warnings + --connect-timeout useful to avoid hangers. Max to wait for the TCP socket connect to return + --openssl-timeout useful to avoid hangers. Max to wait before openssl connect will be terminated + +single check as ("$PROG_NAME URI" does everything except -E and -g): + -e, --each-cipher checks each local cipher remotely + -E, --cipher-per-proto checks those per protocol + -s, --std, --standard tests certain lists of cipher suites by strength + -p, --protocols checks TLS/SSL protocols (including SPDY/HTTP2) + -g, --grease tests several server implementation bugs like GREASE and size limitations + -S, --server-defaults displays the server's default picks and certificate info + -P, --server-preference displays the server's picks: protocol+cipher + -x, --single-cipher tests matched of ciphers + (if not a number: word match) + -c, --client-simulation test client simulations, see which client negotiates with cipher and protocol + -h, --header, --headers tests HSTS, HPKP, server/app banner, security headers, cookie, reverse proxy, IPv4 address + + -U, --vulnerable tests all (of the following) vulnerabilities (if applicable) + -H, --heartbleed tests for Heartbleed vulnerability + -I, --ccs, --ccs-injection tests for CCS injection vulnerability + -T, --ticketbleed tests for Ticketbleed vulnerability in BigIP loadbalancers + -BB, --robot tests for Return of Bleichenbacher's Oracle Threat (ROBOT) vulnerability + -R, --renegotiation tests for renegotiation vulnerabilities + -C, --compression, --crime tests for CRIME vulnerability (TLS compression issue) + -B, --breach tests for BREACH vulnerability (HTTP compression issue) + -O, --poodle tests for POODLE (SSL) vulnerability + -Z, --tls-fallback checks TLS_FALLBACK_SCSV mitigation + -W, --sweet32 tests 64 bit block ciphers (3DES, RC2 and IDEA): SWEET32 vulnerability + -A, --beast tests for BEAST vulnerability + -L, --lucky13 tests for LUCKY13 + -F, --freak tests for FREAK vulnerability + -J, --logjam tests for LOGJAM vulnerability + -D, --drown tests for DROWN vulnerability + -f, --pfs, --fs, --nsa checks (perfect) forward secrecy settings + -4, --rc4, --appelbaum which RC4 ciphers are being offered? + +tuning / connect options (most also can be preset via environment variables): + --fast omits some checks: using openssl for all ciphers (-e), show only first preferred cipher. + -9, --full includes tests for implementation bugs and cipher per protocol (could disappear) + --bugs enables the "-bugs" option of s_client, needed e.g. for some buggy F5s + --assume-http if protocol check fails it assumes HTTP protocol and enforces HTTP checks + --ssl-native fallback to checks with OpenSSL where sockets are normally used + --openssl use this openssl binary (default: look in \$PATH, \$RUN_DIR of $PROG_NAME) + --proxy (experimental) proxy connects via , auto: values from \$env (\$http(s)_proxy) + -6 also use IPv6. Works only with supporting OpenSSL version and IPv6 connectivity + --ip a) tests the supplied v4 or v6 address instead of resolving host(s) in URI + b) arg "one" means: just test the first DNS returns (useful for multiple IPs) + -n, --nodns if "none": do not try any DNS lookups, "min" queries A, AAAA and MX records + --sneaky leave less traces in target logs: user agent, referer + --ids-friendly skips a few vulnerability checks which may cause IDSs to block the scanning IP + --phone-out allow to contact external servers for CRL download and querying OCSP responder + --add-ca path to or a comma separated list of CA files enables test against additional CAs. + --basicauth provide HTTP basic auth information. + +output options (can also be preset via environment variables): + --quiet don't output the banner. By doing this you acknowledge usage terms normally appearing in the banner + --wide wide output for tests like RC4, BEAST. PFS also with hexcode, kx, strength, RFC name + --show-each for wide outputs: display all ciphers tested -- not only succeeded ones + --mapping use the IANA/(RFC) cipher suite name as the primary name cipher suite name form + no-openssl| -> don't display the OpenSSL cipher suite name, display IANA/(RFC) names only + no-iana|no-rfc> -> don't display the IANA/(RFC) cipher suite name, display OpenSSL names only + --color <0|1|2|3> 0: no escape or other codes, 1: b/w escape codes, 2: color (default), 3: extra color (color all ciphers) + --colorblind swap green and blue in the output + --debug <0-6> 1: screen output normal but keeps debug output in /tmp/. 2-6: see "grep -A 5 '^DEBUG=' testssl.sh" + +file output options (can also be preset via environment variables) + --log, --logging logs stdout to '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.log' in current working directory (cwd) + --logfile|-oL logs stdout to 'dir/\${NODE}-p\${port}\${YYYYMMDD-HHMM}.log'. If 'logfile' is a dir or to a specified 'logfile' + --json additional output of findings to flat JSON file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.json' in cwd + --jsonfile|-oj additional output to the specified flat JSON file or directory, similar to --logfile + --json-pretty additional JSON structured output of findings to a file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.json' in cwd + --jsonfile-pretty|-oJ additional JSON structured output to the specified file or directory, similar to --logfile + --csv additional output of findings to CSV file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.csv' in cwd or directory + --csvfile|-oC additional output as CSV to the specified file or directory, similar to --logfile + --html additional output as HTML to file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.html' + --htmlfile|-oH additional output as HTML to the specified file or directory, similar to --logfile + --out(f,F)ile|-oa/-oA log to a LOG,JSON,CSV,HTML file (see nmap). -oA/-oa: pretty/flat JSON. + "auto" uses '\${NODE}-p\${port}\${YYYYMMDD-HHMM}'. If fname if a dir uses 'dir/\${NODE}-p\${port}\${YYYYMMDD-HHMM}' + --hints additional hints to findings + --severity severities with lower level will be filtered for CSV+JSON, possible values + --append if (non-empty) , , or exists, append to file. Omits any header + --outprefix before '\${NODE}.' above prepend + + +Options requiring a value can also be called with '=' e.g. testssl.sh -t=smtp --wide --openssl=/usr/bin/openssl . + always needs to be the last parameter. + +EOF + # Set HTMLHEADER and JSONHEADER to false so that the cleanup() function won't + # try to write footers to the HTML and JSON files. + HTMLHEADER=false + JSONHEADER=false + #' Fix syntax highlight on sublime + "$CHILD_MASS_TESTING" && kill -s USR1 $PPID + exit $1 +} + +maketempf() { + TEMPDIR=$(mktemp -d /tmp/testssl.XXXXXX) + if [[ $? -ne 0 ]]; then + # For e.g. devices where we can't write to /tmp we chose $PWD but we can't + # allow every char as we haven't quoted all strings depending on it, see #1445 + if [[ $PWD =~ [^A-Za-z0-9\.,/_-] ]]; then + fatal "\$PWD contains illegal chars: \"$BASH_REMATCH\"" $ERR_FCREATE + fi + TEMPDIR=$(mktemp -d "$PWD/testssl.XXXXXX") || exit $ERR_FCREATE + fi + TMPFILE=$TEMPDIR/tempfile.txt || exit $ERR_FCREATE + if [[ "$DEBUG" -eq 0 ]]; then + ERRFILE="/dev/null" + else + ERRFILE=$TEMPDIR/errorfile.txt || exit $ERR_FCREATE + fi + HOSTCERT=$TEMPDIR/host_certificate.pem +} + +prepare_debug() { + if [[ $DEBUG -ne 0 ]]; then + cat >$TEMPDIR/environment.txt << EOF + + +CVS_REL: $CVS_REL +GIT_REL: $GIT_REL + +PID: $$ +commandline: "$CMDLINE" +bash version: ${BASH_VERSINFO[0]}.${BASH_VERSINFO[1]}.${BASH_VERSINFO[2]} +status: ${BASH_VERSINFO[4]} +machine: ${BASH_VERSINFO[5]} +operating system: $SYSTEM $SYSTEMREV +os constraint: $SYSTEM2 +shellopts: $SHELLOPTS +printf: $PRINTF +NO_ITALICS: $NO_ITALICS + +$($OPENSSL version -a 2>/dev/null) +OSSL_VER_MAJOR: $OSSL_VER_MAJOR +OSSL_VER_MINOR: $OSSL_VER_MINOR +OSSL_VER_APPENDIX: $OSSL_VER_APPENDIX +OSSL_BUILD_DATE: $OSSL_BUILD_DATE +OSSL_VER_PLATFORM: $OSSL_VER_PLATFORM + +OPENSSL_NR_CIPHERS: $OPENSSL_NR_CIPHERS +OPENSSL_CONF: $OPENSSL_CONF +HAS_CURVES: $HAS_CURVES +OSSL_SUPPORTED_CURVES: $OSSL_SUPPORTED_CURVES + +HAS_IPv6: $HAS_IPv6 +HAS_SSL2: $HAS_SSL2 +HAS_SSL3: $HAS_SSL3 +HAS_TLS13: $HAS_TLS13 +HAS_X448: $HAS_X448 +HAS_X25519: $HAS_X25519 +HAS_NO_SSL2: $HAS_NO_SSL2 +HAS_SPDY: $HAS_SPDY +HAS_ALPN: $HAS_ALPN +HAS_FALLBACK_SCSV: $HAS_FALLBACK_SCSV +HAS_COMP: $HAS_COMP +HAS_NO_COMP: $HAS_NO_COMP +HAS_CIPHERSUITES: $HAS_CIPHERSUITES +HAS_PKEY: $HAS_PKEY +HAS_PKUTIL: $HAS_PKUTIL +HAS_PROXY: $HAS_PROXY +HAS_XMPP: $HAS_XMPP +HAS_POSTGRES: $HAS_POSTGRES +HAS_MYSQL: $HAS_MYSQL +HAS_LMTP: $HAS_LMTP +HAS_NNTP: $HAS_NNTP +HAS_IRC: $HAS_IRC + +HAS_DIG: $HAS_DIG +HAS_HOST: $HAS_HOST +HAS_DRILL: $HAS_DRILL +HAS_NSLOOKUP: $HAS_NSLOOKUP +HAS_IDN: $HAS_IDN +HAS_IDN2: $HAS_IDN2 +HAS_AVAHIRESOLVE: $HAS_AVAHIRESOLVE +HAS_DIG_NOIDNOUT: $HAS_DIG_NOIDNOUT + +PATH: $PATH +PROG_NAME: $PROG_NAME +TESTSSL_INSTALL_DIR: $TESTSSL_INSTALL_DIR +RUN_DIR: $RUN_DIR +CIPHERS_BY_STRENGTH_FILE: $CIPHERS_BY_STRENGTH_FILE + +CAPATH: $CAPATH +COLOR: $COLOR +COLORBLIND: $COLORBLIND +TERM_WIDTH: $TERM_WIDTH +INTERACTIVE: $INTERACTIVE +HAS_GNUDATE: $HAS_GNUDATE +HAS_FREEBSDDATE: $HAS_FREEBSDDATE +HAS_OPENBSDDATE: $HAS_OPENBSDDATE +HAS_SED_E: $HAS_SED_E + +SHOW_EACH_C: $SHOW_EACH_C +SSL_NATIVE: $SSL_NATIVE +ASSUME_HTTP $ASSUME_HTTP +BASICAUTH: $BASICAUTH +SNEAKY: $SNEAKY +OFFENSIVE: $OFFENSIVE +PHONE_OUT: $PHONE_OUT + +DEBUG: $DEBUG + +HSTS_MIN: $HSTS_MIN +HPKP_MIN: $HPKP_MIN +CLIENT_MIN_PFS: $CLIENT_MIN_PFS +DAYS2WARN1: $DAYS2WARN1 +DAYS2WARN2: $DAYS2WARN2 + +HEADER_MAXSLEEP: $HEADER_MAXSLEEP +MAX_WAITSOCK: $MAX_WAITSOCK +HEARTBLEED_MAX_WAITSOCK: $HEARTBLEED_MAX_WAITSOCK +CCS_MAX_WAITSOCK: $CCS_MAX_WAITSOCK +USLEEP_SND $USLEEP_SND +USLEEP_REC $USLEEP_REC + +EOF + type -p locale &>/dev/null && locale >>$TEMPDIR/environment.txt || echo "locale doesn't exist" >>$TEMPDIR/environment.txt + actually_supported_osslciphers 'ALL:COMPLEMENTOFALL' 'ALL' "-V" &>$TEMPDIR/all_local_ciphers.txt + fi + # see also $TEMPDIR/s_client_has.txt from find_openssl_binary +} + + +prepare_arrays() { + local hexc mac ossl_ciph + local ossl_supported_tls="" ossl_supported_sslv2="" + local -i i=0 + + if [[ -e "$CIPHERS_BY_STRENGTH_FILE" ]]; then + "$HAS_SSL2" && ossl_supported_sslv2="$($OPENSSL ciphers -ssl2 -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>$ERRFILE)" + if "$HAS_SSL2"; then + ossl_supported_tls="$(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-tls1 -V")" + else + ossl_supported_tls="$(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL' "-V")" + fi + TLS13_OSSL_CIPHERS="" + while read hexc n TLS_CIPHER_OSSL_NAME[i] TLS_CIPHER_RFC_NAME[i] TLS_CIPHER_SSLVERS[i] TLS_CIPHER_KX[i] TLS_CIPHER_AUTH[i] TLS_CIPHER_ENC[i] mac TLS_CIPHER_EXPORT[i]; do + TLS_CIPHER_HEXCODE[i]="$hexc" + TLS_CIPHER_OSSL_SUPPORTED[i]=false + if [[ ${#hexc} -eq 9 ]]; then + # >= SSLv3 ciphers + if [[ $OSSL_VER_MAJOR -lt 1 ]]; then + [[ ":${ossl_supported_tls}:" =~ ":${TLS_CIPHER_OSSL_NAME[i]}:" ]] && TLS_CIPHER_OSSL_SUPPORTED[i]=true + else + ossl_ciph="$(awk '/'"$hexc"'/ { print $3 }' <<< "$ossl_supported_tls")" + if [[ -n "$ossl_ciph" ]]; then + TLS_CIPHER_OSSL_SUPPORTED[i]=true + [[ "$ossl_ciph" != ${TLS_CIPHER_OSSL_NAME[i]} ]] && TLS_CIPHER_OSSL_NAME[i]="$ossl_ciph" + [[ "${hexc:2:2}" == 13 ]] && TLS13_OSSL_CIPHERS+=":$ossl_ciph" + fi + fi + elif [[ $OSSL_VER_MAJOR -lt 1 ]]; then + [[ ":${ossl_supported_sslv2}:" =~ ":${TLS_CIPHER_OSSL_NAME[i]}:" ]] && TLS_CIPHER_OSSL_SUPPORTED[i]=true + else + [[ "$ossl_supported_sslv2" =~ $hexc ]] && TLS_CIPHER_OSSL_SUPPORTED[i]=true + fi + i+=1 + done < "$CIPHERS_BY_STRENGTH_FILE" + fi + TLS_NR_CIPHERS=i + TLS13_OSSL_CIPHERS="${TLS13_OSSL_CIPHERS:1}" +} + + +mybanner() { + local idtag + local bb1 bb2 bb3 + + "$QUIET" && return + "$CHILD_MASS_TESTING" && return + OPENSSL_NR_CIPHERS=$(count_ciphers "$(actually_supported_osslciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 'ALL')") + [[ -z "$GIT_REL" ]] && \ + idtag="$CVS_REL" || \ + idtag="$GIT_REL -- $CVS_REL_SHORT" + bb1=$(cat </dev/null)\" [~$OPENSSL_NR_CIPHERS ciphers]" + out " on $HNAME:" + outln "$OPENSSL_LOCATION" + outln " (built: \"$OSSL_BUILD_DATE\", platform: \"$OSSL_VER_PLATFORM\")\n" +} + +calc_scantime() { + END_TIME=$(date +%s) + SCAN_TIME=$(( END_TIME - START_TIME )) +} + +cleanup() { + # If parallel mass testing is being performed, then the child tests need + # to be killed before $TEMPDIR is deleted. Otherwise, error messages + # will be created if testssl.sh is stopped before all testing is complete. + "$INTERACTIVE" && [[ $NR_PARALLEL_TESTS -gt 0 ]] && echo -en "\r \r" 1>&2 + while [[ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ]]; do + if [[ ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} -ne 0 ]] && \ + ps ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >/dev/null ; then + kill ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >&2 2>/dev/null + wait ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} 2>/dev/null # make sure pid terminated, see wait(1p) + get_next_message_testing_parallel_result "stopped" + else + # If a test had already completed, but its output wasn't yet processed, + # then process it now. + get_next_message_testing_parallel_result "completed" + fi + NEXT_PARALLEL_TEST_TO_FINISH+=1 + done + if [[ "$DEBUG" -ge 1 ]]; then + tmln_out + tm_underline "DEBUG (level $DEBUG): see files in $TEMPDIR" + tmln_out + else + [[ -d "$TEMPDIR" ]] && rm -rf "$TEMPDIR"; + fi + outln + # No shorthand expression to avoid errors when $CMDLINE_PARSED haven't been filled yet. + if [[ $CMDLINE_PARSED == true ]]; then + "$SECTION_FOOTER_NEEDED" && fileout_section_footer true + html_footer + fileout_footer + fi + # debugging off, see above + grep -q xtrace <<< "$SHELLOPTS" && ! "$DEBUG_ALLINONE" && exec 2>&42 42>&- +} + +child_error() { + cleanup + exit $ERR_CHILD +} + + +# Program terminates prematurely, with error code +# arg1: string to print / to write to file +# arg2: global error code, see ERR_* above +# arg3: an optional hint (string) +# +fatal() { + outln + prln_magenta "Fatal error: $1" >&2 + [[ -n "$LOGFILE" ]] && prln_magenta "Fatal error: $1" >>$LOGFILE + if [[ -n "$3" ]]; then + outln "$3" >&2 + [[ -n "$LOGFILE" ]] && outln "$3" >>$LOGFILE + fi + # Make sure we don't try to write into files when not created yet. + # No shorthand expression to avoid errors when $CMDLINE_PARSED haven't been filled yet. + [[ $CMDLINE_PARSED == true ]] && fileout "scanProblem" "FATAL" "$1" + exit $2 +} + +# This OTOH doesn't exit but puts a fatal error to the screen but continues with the next +# IP/hostname. It should only be used if a single IP/Hostname in a scan is not reachable. +# arg1: string to print / to write to file +# +ip_fatal() { + outln + prln_magenta "Fatal error: $1, proceeding with next IP (if any)" >&2 + [[ -n "$LOGFILE" ]] && prln_magenta "Fatal error: $1, proceeding with next IP (if any)" >>$LOGFILE + outln + fileout "scanProblem" "FATAL" "$1, proceeding with next IP (if any)" + return 0 +} + +# This generic function outputs an error onto the screen and handles logging. +# arg1: string to print / to write to file, arg2 (optional): additional hint to write +# +generic_nonfatal() { + prln_magenta "$1" >&2 + [[ -n $2 ]] && outln "$2" + [[ -n "$LOGFILE" ]] && prln_magenta "$1" >>$LOGFILE && [[ -n $2 ]] && outln "$2" >>$LOGFILE + outln + fileout "scanProblem" "WARN" "$1" + return 0 +} + +initialize_engine(){ + # for now only GOST engine + grep -q '^# testssl config file' "$OPENSSL_CONF" 2>/dev/null && \ + return 0 # We have been here already + if "$NO_ENGINE"; then + # Avoid potential conflicts also -- manual hook, see #1117 + export OPENSSL_CONF='' + return 1 + elif $OPENSSL engine gost -v 2>&1 | grep -Eq 'invalid command|no such engine'; then + outln + pr_warning "No engine or GOST support via engine with your $OPENSSL"; outln + fileout_insert_warning "engine_problem" "WARN" "No engine or GOST support via engine with your $OPENSSL" + export OPENSSL_CONF='' + return 1 + elif ! $OPENSSL engine gost -vvvv -t -c 2>/dev/null >/dev/null; then + # check for openssl 1.1.1 config -- not this may not be reliable. We only use this + # to suppress the warning (confuses users), see #1119 + # https://github.com/openssl/openssl/commit/b524b808a1d1ba204dbdcbb42de4e3bddb3472ac + if ! grep -q 'using the .include directive' /etc/ssl/openssl.cnf; then + outln + pr_warning "No engine or GOST support via engine with your $OPENSSL"; outln + fi + fileout_insert_warning "engine_problem" "WARN" "No engine or GOST support via engine with your $OPENSSL" + # Avoid clashes of OpenSSL 1.1.1 config file with our openssl 1.0.2. This is for Debian 10 + export OPENSSL_CONF='' + return 1 + else # we have engine support + if [[ -n "$OPENSSL_CONF" ]]; then + prln_warning "For now I am providing the config file to have GOST support" + else + OPENSSL_CONF=$TEMPDIR/gost.conf + # see https://www.mail-archive.com/openssl-users@openssl.org/msg65395.html + cat >$OPENSSL_CONF << EOF +# testssl config file for openssl + +openssl_conf = openssl_def + +[ openssl_def ] +engines = engine_section + +[ engine_section ] +gost = gost_section + +[ gost_section ] +engine_id = gost +default_algorithms = ALL +CRYPT_PARAMS = id-Gost28147-89-CryptoPro-A-ParamSet + +EOF + [[ $? -ne 0 ]] && exit $ERR_OSSLBIN + export OPENSSL_CONF + fi + fi + return 0 +} + +# arg1: text to display before "-->" +# arg2: arg needed to accept to continue +ignore_no_or_lame() { + local a + + [[ "$WARNINGS" == off ]] && return 0 + [[ "$WARNINGS" == batch ]] && return 1 + tm_warning "$1 --> " + read a + if [[ "$2" == "$(toupper "$2")" ]]; then + # all uppercase requested + if [[ "$a" == "$2" ]]; then + return 0 + else + return 1 + fi + elif [[ "$2" == "$(tolower "$a")" ]]; then + # we normalize the word to continue + return 0 + else + return 1 + fi +} + +# arg1: URI +parse_hn_port() { + local tmp_port + local node_tmp="" + + NODE="$1" + NODE="${NODE/https\:\/\//}" # strip "https" + NODE="${NODE%%/*}" # strip trailing urlpath + NODE="${NODE%%.}" # strip trailing "." if supplied + if grep -q ':$' <<< "$NODE"; then + if grep -wq http <<< "$NODE"; then + fatal "\"http\" is not what you meant probably" $ERR_CMDLINE + else + fatal "\"$1\" is not a valid URI" $ERR_CMDLINE + fi + fi + # Was an IPv6 address supplied like [AA:BB:CC::]:port ? + if grep -q ']' <<< "$NODE"; then + tmp_port=$(printf "$NODE" | sed 's/\[.*\]//' | sed 's/://') + # determine v6 port, supposed it was supplied additionally + if [[ -n "$tmp_port" ]]; then + PORT=$tmp_port + NODE=$(sed "s/:$PORT//" <<< "$NODE") + fi + NODE=$(sed -e 's/\[//' -e 's/\]//' <<< "$NODE") + else + # determine v4 port, supposed it was supplied additionally + grep -q ':' <<< "$NODE" && \ + PORT=$(sed 's/^.*\://' <<< "$NODE") && NODE=$(sed 's/\:.*$//' <<< "$NODE") + fi + + # We check for non-ASCII chars now. If there are some we'll try to convert it if IDN/IDN2 is installed + # If not, we'll continue. Hoping later that dig can use it. If not the error handler will tell + # Honestly we don't care whether it's IDN2008 or IDN2003 or Emoji domains as long as it works. + # So we try to resolve anything supplied. If it can't our resolver error handler takes care + if [[ "$NODE" == *[![:ascii:]]* ]]; then + if ! "$HAS_IDN2" && ! "$HAS_IDN"; then + prln_warning " URI contains non-ASCII characters and libidn/libidn2 not available." + outln " Trying to feed the resolver without converted \"$NODE\" ...\n" + #ToDo: fileout is missing + node_tmp="$NODE" + elif "$HAS_IDN2"; then + node_tmp="$(idn2 "$NODE" 2>/dev/null)" + fi + if "$HAS_IDN" && [[ -z "$node_tmp" ]]; then + node_tmp="$(idn "$NODE" 2>/dev/null)" + fi + if [[ -z "$node_tmp" ]]; then + prln_warning " URI contains non-ASCII characters and IDN conversion failed." + outln " Trying to feed the resolver without converted \"$NODE\" ...\n" + #ToDo: fileout is missing + node_tmp="$NODE" + fi + NODE="$node_tmp" + fi + + debugme echo $NODE:$PORT + SNI="-servername $NODE" + URL_PATH=$(sed 's/https:\/\///' <<< "$1" | sed 's/'"${NODE}"'//' | sed 's/.*'"${PORT}"'//') # remove protocol and node part and port + URL_PATH=$(sed 's/\/\//\//g' <<< "$URL_PATH") # we rather want // -> / + URL_PATH=${URL_PATH%%.} # strip trailing "." so that it is not interpreted as URL + [[ -z "$URL_PATH" ]] && URL_PATH="/" + debugme echo "URL_PATH: $URL_PATH" + return 0 # NODE, URL_PATH, PORT is set now +} + + +# args: string containing ip addresses +filter_ip6_address() { + local a + + for a in "$@"; do + if ! is_ipv6addr "$a"; then + continue + fi + if "$HAS_SED_E"; then + sed -E 's/^abcdeABCDEFf0123456789:]//g' <<< "$a" | sed -e '/^$/d' -e '/^;;/d' + else + sed -r 's/[^abcdefABCDEF0123456789:]//g' <<< "$a" | sed -e '/^$/d' -e '/^;;/d' + fi + done +} + +filter_ip4_address() { + local a + + for a in "$@"; do + if ! is_ipv4addr "$a"; then + continue + fi + if "$HAS_SED_E"; then + sed -E 's/[^[:digit:].]//g' <<< "$a" | sed -e '/^$/d' + else + sed -r 's/[^[:digit:].]//g' <<< "$a" | sed -e '/^$/d' + fi + done +} + +# For security testing sometimes we have local entries. Getent is BS under Linux for localhost: No network, no resolution +# arg1 is the entry we want to look up in the host file +get_local_aaaa() { + local ip6="" + local etchosts="/etc/hosts /c/Windows/System32/drivers/etc/hosts" + + [[ -z "$1" ]] && echo "" && return 1 + # Also multiple records should work fine + ip6=$(grep -wih "$1" $etchosts 2>/dev/null | grep ':' | grep -Ev '^#|\.local' | grep -Ei "[[:space:]]$1" | awk '{ print $1 }') + if is_ipv6addr "$ip6"; then + echo "$ip6" + else + echo "" + fi +} +get_local_a() { + local ip4="" + local etchosts="/etc/hosts /c/Windows/System32/drivers/etc/hosts" + + ip4=$(grep -wih "$1" $etchosts 2>/dev/null | grep -Ev ':|^#|\.local' | grep -Ei "[[:space:]]$1" | awk '{ print $1 }') + if is_ipv4addr "$ip4"; then + echo "$ip4" + else + echo "" + fi +} + +# Does a hard exit if no lookup binary is provided +# Checks for IDN capabilities also +# +check_resolver_bins() { + local saved_openssl_conf="$OPENSSL_CONF" + + type -p dig &> /dev/null && HAS_DIG=true + type -p host &> /dev/null && HAS_HOST=true + type -p drill &> /dev/null && HAS_DRILL=true + type -p nslookup &> /dev/null && HAS_NSLOOKUP=true + type -p avahi-resolve &>/dev/null && HAS_AVAHIRESOLVE=true + type -p idn &>/dev/null && HAS_IDN=true + type -p idn2 &>/dev/null && HAS_IDN2=true + + OPENSSL_CONF="" # see https://github.com/drwetter/testssl.sh/issues/134 + if ! "$HAS_DIG" && ! "$HAS_HOST" && ! "$HAS_DRILL" && ! "$HAS_NSLOOKUP"; then + fatal "Neither \"dig\", \"host\", \"drill\" or \"nslookup\" is present" $ERR_DNSBIN + fi + if "$HAS_DIG"; then + if dig +noidnout -t a 2>&1 | grep -Eq 'Invalid option: \+noidnout|IDN support not enabled'; then + : + else + HAS_DIG_NOIDNOUT=true + fi + fi + OPENSSL_CONF="$saved_openssl_conf" # see https://github.com/drwetter/testssl.sh/issues/134 + return 0 +} + +# arg1: a host name. Returned will be 0-n IPv4 addresses +# watch out: $1 can also be a cname! --> all checked +get_a_record() { + local ip4="" + local saved_openssl_conf="$OPENSSL_CONF" + local noidnout="" + + "$HAS_DIG_NOIDNOUT" && noidnout="+noidnout" + [[ "$NODNS" == none ]] && return 0 # if no DNS lookup was instructed, leave here + if [[ "$1" == localhost ]]; then + # This is a bit ugly but prevents from doing DNS lookups which could fail + echo 127.0.0.1 + return 0 + fi + if is_ipv4addr "$1"; then + # This saves walking through this. Also it avoids hangs e.g. if you run docker locally without reachabale DNS + echo $1 + return 0 + fi + OPENSSL_CONF="" # see https://github.com/drwetter/testssl.sh/issues/134 + if [[ "$NODE" == *.local ]]; then + if "$HAS_AVAHIRESOLVE"; then + ip4=$(filter_ip4_address $(avahi-resolve -4 -n "$1" 2>/dev/null | awk '{ print $2 }')) + elif "$HAS_DIG"; then + ip4=$(filter_ip4_address $(dig @224.0.0.251 -p 5353 +short -t a +notcp "$1" 2>/dev/null | sed '/^;;/d')) + else + fatal "Local hostname given but no 'avahi-resolve' or 'dig' available." $ERR_DNSBIN + fi + fi + if [[ -z "$ip4" ]] && "$HAS_DIG"; then + ip4=$(filter_ip4_address $(dig +short +timeout=2 +tries=2 $noidnout -t a "$1" 2>/dev/null | awk '/^[0-9]/ { print $1 }')) + fi + if [[ -z "$ip4" ]] && "$HAS_HOST"; then + ip4=$(filter_ip4_address $(host -t a "$1" 2>/dev/null | awk '/address/ { print $NF }')) + fi + if [[ -z "$ip4" ]] && "$HAS_DRILL"; then + ip4=$(filter_ip4_address $(drill a "$1" | awk '/ANSWER SECTION/,/AUTHORITY SECTION/ { print $NF }' | awk '/^[0-9]/')) + fi + if [[ -z "$ip4" ]] && "$HAS_NSLOOKUP"; then + ip4=$(filter_ip4_address $(strip_lf "$(nslookup -querytype=a "$1" 2>/dev/null | awk '/^Name/ { getline; print $NF }')")) + fi + OPENSSL_CONF="$saved_openssl_conf" # see https://github.com/drwetter/testssl.sh/issues/134 + echo "$ip4" +} + +# arg1: a host name. Returned will be 0-n IPv6 addresses +# watch out: $1 can also be a cname! --> all checked +get_aaaa_record() { + local ip6="" + local saved_openssl_conf="$OPENSSL_CONF" + local noidnout="" + + "$HAS_DIG_NOIDNOUT" && noidnout="+noidnout" + [[ "$NODNS" == none ]] && return 0 # if no DNS lookup was instructed, leave here + OPENSSL_CONF="" # see https://github.com/drwetter/testssl.sh/issues/134 + if is_ipv6addr "$1"; then + # This saves walking through this. Also it avoids hangs e.g. if you run docker locally without reachabale DNS + echo "$1" + return 0 + elif is_ipv4addr "$1"; then + # we need also this here as get_aaaa_record is always called after get_a_record and we want to handle this at a low level + return 0 + fi + if [[ -z "$ip6" ]]; then + if [[ "$NODE" == *.local ]]; then + if "$HAS_AVAHIRESOLVE"; then + ip6=$(filter_ip6_address $(avahi-resolve -6 -n "$1" 2>/dev/null | awk '{ print $2 }')) + elif "$HAS_DIG"; then + ip6=$(filter_ip6_address $(dig @ff02::fb -p 5353 -t aaaa +short +notcp "$NODE")) + else + fatal "Local hostname given but no 'avahi-resolve' or 'dig' available." $ERR_DNSBIN + fi + elif "$HAS_DIG"; then + ip6=$(filter_ip6_address $(dig +short +timeout=2 +tries=2 $noidnout -t aaaa "$1" 2>/dev/null | awk '/^[0-9]/ { print $1 }')) + elif "$HAS_HOST"; then + ip6=$(filter_ip6_address $(host -t aaaa "$1" | awk '/address/ { print $NF }')) + elif "$HAS_DRILL"; then + ip6=$(filter_ip6_address $(drill aaaa "$1" | awk '/ANSWER SECTION/,/AUTHORITY SECTION/ { print $NF }' | awk '/^[0-9]/')) + elif "$HAS_NSLOOKUP"; then + ip6=$(filter_ip6_address $(strip_lf "$(nslookup -type=aaaa "$1" 2>/dev/null | awk '/'"^${a}"'.*AAAA/ { print $NF }')")) + fi + fi + OPENSSL_CONF="$saved_openssl_conf" # see https://github.com/drwetter/testssl.sh/issues/134 + echo "$ip6" +} + +# RFC6844: DNS Certification Authority Authorization (CAA) Resource Record +# arg1: domain to check for +get_caa_rr_record() { + local raw_caa="" + local -i len_caa_property + local caa_property_name + local caa_property_value + local saved_openssl_conf="$OPENSSL_CONF" + local all_caa="" + local noidnout="" + + "$HAS_DIG_NOIDNOUT" && noidnout="+noidnout" + + [[ -n "$NODNS" ]] && return 0 # if minimum DNS lookup was instructed, leave here + # if there's a type257 record there are two output formats here, mostly depending on age of distribution + # roughly that's the difference between text and binary format + # 1) 'google.com has CAA record 0 issue "symantec.com"' + # 2) 'google.com has TYPE257 record \# 19 0005697373756573796D616E7465632E636F6D' + # for dig +short the output always starts with '0 issue [..]' or '\# 19 [..]' so we normalize thereto to keep caa_flag, caa_property + # caa_property then has key/value pairs, see https://tools.ietf.org/html/rfc6844#section-3 + OPENSSL_CONF="" + if "$HAS_DIG"; then + raw_caa="$(dig +short +timeout=3 +tries=3 $noidnout type257 "$1" 2>/dev/null | awk '{ print $1" "$2" "$3 }')" + # empty if no CAA record + elif "$HAS_DRILL"; then + raw_caa="$(drill $1 type257 | awk '/'"^${1}"'.*CAA/ { print $5,$6,$7 }')" + elif "$HAS_HOST"; then + raw_caa="$(host -t type257 $1)" + if grep -Ewvq "has no CAA|has no TYPE257" <<< "$raw_caa"; then + raw_caa="$(sed -e 's/^.*has CAA record //' -e 's/^.*has TYPE257 record //' <<< "$raw_caa")" + fi + elif "$HAS_NSLOOKUP"; then + raw_caa="$(strip_lf "$(nslookup -type=type257 $1 | grep -w rdata_257)")" + if [[ -n "$raw_caa" ]]; then + raw_caa="$(sed 's/^.*rdata_257 = //' <<< "$raw_caa")" + fi + else + return 1 + # No dig, drill, host, or nslookup --> complaint was elsewhere already + fi + OPENSSL_CONF="$saved_openssl_conf" # see https://github.com/drwetter/testssl.sh/issues/134 + debugme echo $raw_caa + + if [[ "$raw_caa" =~ \#\ [0-9][0-9] ]]; then + # for posteo we get this binary format returned e.g. for old dig versions: + # \# 19 0005697373756567656F74727573742E636F6D + # \# 23 0009697373756577696C6467656F74727573742E636F6D + # \# 34 0005696F6465666D61696C746F3A686F73746D617374657240706F73 74656F2E6465 + # # len caaflag @ p o s t e o . d e + while read hash len line ;do + if [[ "${line:0:2}" == "00" ]]; then # probably the caa flag, always 00, so we don't keep this + len_caa_property=$(printf "%0d" "$((10#${line:2:2}))") # get len and do type casting, for posteo we have 05 or 09 here as a string + len_caa_property=$((len_caa_property*2)) # =>word! Now get name from 4th and value from 4th+len position... + line="${line/ /}" # especially with iodefs there's a blank in the string which we just skip + caa_property_name="$(hex2ascii ${line:4:$len_caa_property})" + caa_property_value="$(hex2ascii "${line:$((4+len_caa_property)):100}")" + # echo "${caa_property_name}=${caa_property_value}" + all_caa+="${caa_property_name}=${caa_property_value}\n" + else + outln "please report unknown CAA RR $line with flag @ $NODE" + return 7 + fi + done <<< "$raw_caa" + sort <<< "$(safe_echo "$all_caa")" + return 0 + elif grep -q '"' <<< "$raw_caa"; then + raw_caa=${raw_caa//\"/} # strip all ". Now we should have flag, name, value + #caa_property_name="$(awk '{ print $2 }' <<< "$raw_caa")" + #caa_property_value="$(awk '{ print $3 }' <<< "$raw_caa")" + safe_echo "$(sort <<< "$(awk '{ print $2"="$3 }' <<< "$raw_caa")")" + return 0 + else + # no caa record + return 1 + fi + +# to do: +# 4: check whether $1 is a CNAME and take this + return 0 +} + +# watch out: $1 can also be a cname! --> all checked +get_mx_record() { + local mx="" + local saved_openssl_conf="$OPENSSL_CONF" + local noidnout="" + + "$HAS_DIG_NOIDNOUT" && noidnout="+noidnout" + OPENSSL_CONF="" # see https://github.com/drwetter/testssl.sh/issues/134 + # we need the last two columns here + if "$HAS_HOST"; then + mxs="$(host -t MX "$1" 2>/dev/null | awk '/is handled by/ { print $(NF-1), $NF }')" + elif "$HAS_DIG"; then + mxs="$(dig +short $noidnout -t MX "$1" 2>/dev/null | awk '/^[0-9]/ { print $1" "$2 }')" + elif "$HAS_DRILL"; then + mxs="$(drill mx $1 | awk '/IN[ \t]MX[ \t]+/ { print $(NF-1), $NF }')" + elif "$HAS_NSLOOKUP"; then + mxs="$(strip_lf "$(nslookup -type=MX "$1" 2>/dev/null | awk '/mail exchanger/ { print $(NF-1), $NF }')")" + else + # shouldn't reach this, as we checked in the top + fatal "No dig, host, drill or nslookup" $ERR_DNSBIN + fi + OPENSSL_CONF="$saved_openssl_conf" + echo "$mxs" +} + + +# set IPADDRs and IP46ADDRs +# +determine_ip_addresses() { + local ip4="" + local ip6="" + + ip4="$(get_a_record "$NODE")" + ip6="$(get_aaaa_record "$NODE")" + IP46ADDRs=$(newline_to_spaces "$ip4 $ip6") + + if [[ -n "$CMDLINE_IP" ]]; then + # command line has supplied an IP address or "one" + if [[ "$CMDLINE_IP" == one ]]; then + # use first IPv6 or IPv4 address + if "$HAS_IPv6" && [[ -n "$ip6" ]]; then + CMDLINE_IP="$(head -1 <<< "$ip6")" + else + CMDLINE_IP="$(head -1 <<< "$ip4")" + fi + fi + NODEIP="$CMDLINE_IP" + if is_ipv4addr "$NODEIP"; then + ip4="$NODEIP" + elif is_ipv6addr "$NODEIP"; then + ip6="$NODEIP" + else + fatal "couldn't identify supplied \"CMDLINE_IP\"" $ERR_DNSLOOKUP + fi + elif is_ipv4addr "$NODE"; then + ip4="$NODE" # only an IPv4 address was supplied as an argument, no hostname + SNI="" # override Server Name Indication as we test the IP only + else + ip4=$(get_local_a "$NODE") # is there a local host entry? + if [[ -z "$ip4" ]]; then # empty: no (LOCAL_A is predefined as false) + ip4=$(get_a_record "$NODE") + else + LOCAL_A=true # we have the ip4 from local host entry and need to signal this to testssl + fi + # same now for ipv6 + ip6=$(get_local_aaaa "$NODE") + if [[ -z "$ip6" ]]; then + ip6=$(get_aaaa_record "$NODE") + else + LOCAL_AAAA=true # we have a local ipv6 entry and need to signal this to testssl + fi + fi + + # IPv6 only address + if [[ -z "$ip4" ]]; then + if "$HAS_IPv6"; then + IPADDRs=$(newline_to_spaces "$ip6") + IP46ADDRs="$IPADDRs" # IP46ADDRs are the ones to display, IPADDRs the ones to test + fi + else + if "$HAS_IPv6" && [[ -n "$ip6" ]]; then + if is_ipv6addr "$CMDLINE_IP"; then + IPADDRs=$(newline_to_spaces "$ip6") + else + IPADDRs=$(newline_to_spaces "$ip4 $ip6") + fi + else + IPADDRs=$(newline_to_spaces "$ip4") + fi + fi + if [[ -z "$IPADDRs" ]]; then + if [[ -n "$ip6" ]]; then + fatal "Only IPv6 address(es) for \"$NODE\" available, maybe add \"-6\" to $0" $ERR_DNSLOOKUP + else + fatal "No IPv4/IPv6 address(es) for \"$NODE\" available" $ERR_DNSLOOKUP + fi + fi + return 0 # IPADDR and IP46ADDR is set now +} + +determine_rdns() { + local saved_openssl_conf="$OPENSSL_CONF" + local nodeip="" rdns="" line="" + + [[ -n "$NODNS" ]] && rDNS="(instructed to minimize DNS queries)" && return 0 # PTR records were not asked for + local nodeip="$(tr -d '[]' <<< $NODEIP)" # for DNS we do not need the square brackets of IPv6 addresses + OPENSSL_CONF="" # see https://github.com/drwetter/testssl.sh/issues/134 + if [[ "$NODE" == *.local ]]; then + if "$HAS_AVAHIRESOLVE"; then + rDNS=$(avahi-resolve -a $nodeip 2>/dev/null | awk '{ print $2 }') + elif "$HAS_DIG"; then + rDNS=$(dig -x $nodeip @224.0.0.251 -p 5353 +notcp +noall +answer +short | awk '{ print $1 }') + fi + elif "$HAS_DIG"; then + # 1+2 should suffice. It's a compromise for if e.g. network is down but we have a docker/localhost server + rDNS=$(dig -x $nodeip +timeout=1 +tries=2 +noall +answer +short | awk '{ print $1 }') # +short returns also CNAME, e.g. openssl.org + elif "$HAS_HOST"; then + rDNS=$(host -t PTR $nodeip 2>/dev/null | awk '/pointer/ { print $NF }') + elif "$HAS_DRILL"; then + rDNS=$(drill -x ptr $nodeip 2>/dev/null | awk '/ANSWER SECTION/ { getline; print $NF }') + elif "$HAS_NSLOOKUP"; then + rDNS=$(strip_lf "$(nslookup -type=PTR $nodeip 2>/dev/null | grep -v 'canonical name =' | grep 'name = ' | awk '{ print $NF }' | sed 's/\.$//')") + fi + OPENSSL_CONF="$saved_openssl_conf" # see https://github.com/drwetter/testssl.sh/issues/134 + # First, rDNS can contain > 1 line due to multiple PTR DNS records, though this is not recommended. + # So we use a loop to check for each FQDN returned. There we remove chars which under weird + # circumstances (see #1506) can show up here. The blacklist is taken from RFC 1912 ("Allowable characters in a + # label for a host name are only ASCII, letters, digits, and the `-' character") + while read -r line; do + line="$(tr -dc '[a-zA-Z0-9-_.]' <<< "$line")" + [[ -z "$rdns" ]] && rdns="$line" || rdns="$rdns $line" + done <<< "$rDNS" + rDNS="$rdns" + [[ -z "$rDNS" ]] && rDNS="--" + return 0 +} + +# We need to get the IP address of the proxy so we can use it in fd_socket +# +check_proxy() { + if [[ -n "$PROXY" ]]; then + if ! "$HAS_PROXY"; then + fatal "Your $OPENSSL is too old to support the \"-proxy\" option" $ERR_OSSLBIN + fi + if [[ "$PROXY" == auto ]]; then + # Get $ENV https_proxy is the one we care about for connects + PROXY="${https_proxy#*\/\/}" + # Fallback: + [[ -z "$PROXY" ]] && PROXY="${http_proxy#*\/\/}" + [[ -z "$PROXY" ]] && fatal "you specified \"--proxy=auto\" but \"\$http(s)_proxy\" is empty" $ERR_CMDLINE + fi + # strip off http/https part if supplied: + PROXY="${PROXY/http\:\/\//}" + PROXY="${PROXY/https\:\/\//}" # this shouldn't be needed + PROXYNODE="${PROXY%:*}" + PROXYPORT="${PROXY#*:}" + is_number "$PROXYPORT" || fatal "Proxy port cannot be determined from \"$PROXY\"" $ERR_CMDLINE + + #if is_ipv4addr "$PROXYNODE" || is_ipv6addr "$PROXYNODE" ; then + # IPv6 via openssl -proxy: that doesn't work. Sockets does +#FIXME: finish this with LibreSSL which supports an IPv6 proxy + if is_ipv4addr "$PROXYNODE"; then + PROXYIP="$PROXYNODE" + else + PROXYIP="$(get_a_record "$PROXYNODE" 2>/dev/null | grep -v alias | sed 's/^.*address //')" + [[ -z "$PROXYIP" ]] && fatal "Proxy IP cannot be determined from \"$PROXYNODE\"" $ERR_CMDLINE + fi + PROXY="-proxy $PROXYIP:$PROXYPORT" + fi +} + + +# this is only being called from determine_optimal_proto in order to check whether we have a server +# with client authentication, a server with no SSL session ID switched off +# +sclient_auth() { + [[ $1 -eq 0 ]] && return 0 # no client auth (CLIENT_AUTH=false is preset globally) + if [[ -n $(awk '/Master-Key: / { print $2 }' "$2") ]]; then # connect succeeded + if grep -q '^<<< .*CertificateRequest' "$2"; then # CertificateRequest message in -msg + CLIENT_AUTH=true + return 0 + fi + if [[ -z $(awk '/Session-ID: / { print $2 }' "$2") ]]; then # probably no SSL session + if [[ 2 -eq $(grep -c CERTIFICATE "$2") ]]; then # do another sanity check to be sure + CLIENT_AUTH=false + NO_SSL_SESSIONID=true # NO_SSL_SESSIONID is preset globally to false for all other cases + return 0 + fi + fi + fi + # what's left now is: master key empty, handshake returned not successful, session ID empty --> not successful + return 1 +} + +# Determine the best parameters to use with tls_sockets(): +# For TLSv1.3, determine what extension number to use for the key_share extension. +# For TLSv1.2, determine what cipher list to send, since there are more than 128 +# TLSv1.2 ciphers and some servers fail if the ClientHello contains too many ciphers. +# If both TLSv1.3 and TLSv1.2 ClientHello messages result in failed connection attempts, +# then try to determine whether: +# (1) This is an SSLv2-only server +# (2) This server supports some protocol in SSLv3 - TLSv1.1, but cannot handle version negotiation. +# (3) This is not a TLS/SSL enabled server. +# This information can be used by determine_optimal_proto() to help distinguish between a server +# that is not TLS/SSL enabled and one that is not compatible with the version of OpenSSL being used. +determine_optimal_sockets_params() { + local -i ret1=1 ret2=1 + local i proto cipher_offered + local all_failed=true + + # If a STARTTLS protocol is specified and $SSL_NATIVE is true, then skip this test, since + # $SSL_NATIVE may have been set to true as a result of tls_sockets() not supporting the STARTTLS + # protocol. + [[ -n "$STARTTLS_PROTOCOL" ]] && "$SSL_NATIVE" && return 0 + + # NOTE: The following code is only needed as long as draft versions of TLSv1.3 prior to draft 23 + # are supported. It is used to determine whether a draft 23 or pre-draft 23 ClientHello should be + # sent. + KEY_SHARE_EXTN_NR="33" + tls_sockets "04" "$TLS13_CIPHER" "" "00, 2b, 00, 0f, 0e, 03,04, 7f,1c, 7f,1b, 7f,1a, 7f,19, 7f,18, 7f,17" + if [[ $? -eq 0 ]]; then + add_tls_offered tls1_3 yes + all_failed=false + else + KEY_SHARE_EXTN_NR="28" + tls_sockets "04" "$TLS13_CIPHER" "" "00, 2b, 00, 0b, 0a, 7f,16, 7f,15, 7f,14, 7f,13, 7f,12" + if [[ $? -eq 0 ]]; then + add_tls_offered tls1_3 yes + all_failed=false + else + add_tls_offered tls1_3 no + KEY_SHARE_EXTN_NR="33" + fi + fi + if ! "$all_failed"; then + # Determine which version of TLS 1.3 was offered. For drafts 18-21 the + # version appears in the ProtocolVersion field of the ServerHello. For + # drafts 22-28 and the final TLS 1.3 the ProtocolVersion field contains + # 0303 and the actual version appears in the supported_versions extension. + if [[ "${TLS_SERVER_HELLO:8:3}" == 7F1 ]]; then + add_tls_offered tls1_3_draft$(hex2dec "${TLS_SERVER_HELLO:10:2}") yes + elif [[ "$TLS_SERVER_HELLO" =~ 002B00020304 ]]; then + add_tls_offered tls1_3_rfc8446 yes + elif [[ "$TLS_SERVER_HELLO" =~ 002B00027F1[2-9A-C] ]]; then + add_tls_offered tls1_3_draft$(hex2dec "${BASH_REMATCH:10:2}") yes + fi + fi + + # Need to determine which set of ciphers is best to use with + # a TLSv1.2 ClientHello since there are far more than 128 ciphers + # that can be used. + tls_sockets "03" "$TLS12_CIPHER" + ret1=$? + if [[ $ret1 -eq 0 ]] || [[ $ret1 -eq 2 ]]; then + case $DETECTED_TLS_VERSION in + 0303) add_tls_offered tls1_2 yes ;; + 0302) add_tls_offered tls1_1 yes ;; + 0301) add_tls_offered tls1 yes ;; + 0300) add_tls_offered ssl3 yes ;; + esac + all_failed=false + fi + + # Try again with a different, less common, set of cipher suites + # see #807 and #806. If using these cipher suites results in a + # successful connection, then change $TLS12_CIPHER to these + # cipher suites so that later tests will use this list of cipher + # suites. + if [[ $ret1 -ne 0 ]]; then + tls_sockets "03" "$TLS12_CIPHER_2ND_TRY" + ret2=$? + if [[ $ret2 -eq 0 ]]; then + add_tls_offered tls1_2 yes + TLS12_CIPHER="$TLS12_CIPHER_2ND_TRY" + all_failed=false + else + add_tls_offered tls1_2 no + fi + if [[ $ret2 -eq 2 ]]; then + case $DETECTED_TLS_VERSION in + 0302) add_tls_offered tls1_1 yes ;; + 0301) add_tls_offered tls1 yes ;; + 0300) add_tls_offered ssl3 yes ;; + esac + [[ $ret1 -ne 2 ]] && TLS12_CIPHER="$TLS12_CIPHER_2ND_TRY" + all_failed=false + fi + fi + if [[ $ret1 -eq 0 ]] || [[ $ret2 -eq 0 ]]; then + cipher_offered="$(get_cipher "$TEMPDIR/$NODEIP.parse_tls_serverhello.txt")" + if [[ "$cipher_offered" == TLS_* ]] || [[ "$cipher_offered" == SSL_* ]]; then + cipher_offered="$(rfc2hexcode "$cipher_offered")" + else + cipher_offered="$(openssl2hexcode "$cipher_offered")" + fi + [[ ${#cipher_offered} -eq 9 ]] && TLS12_CIPHER_OFFERED="${cipher_offered:2:2},${cipher_offered:7:2}" + fi + + if "$all_failed"; then + # One of the following must be true: + # * This is not a TLS/SSL enabled server. + # * The server only supports SSLv2 + # * The server does not handle version negotiation correctly. + for proto in 01 00 02; do + tls_sockets "$proto" "$TLS_CIPHER" "" "" "true" + ret1=$? + if [[ $ret1 -ne 0 ]]; then + case $proto in + 02) add_tls_offered tls1_1 no ;; + 01) add_tls_offered tls1 no ;; + 00) add_tls_offered ssl3 no ;; + esac + fi + if [[ $ret1 -eq 0 ]] || [[ $ret1 -eq 2 ]]; then + case $DETECTED_TLS_VERSION in + 0302) add_tls_offered tls1_1 yes ;; + 0301) add_tls_offered tls1 yes ;; + 0300) add_tls_offered ssl3 yes ;; + esac + OPTIMAL_SOCKETS_PROTO="$proto" + all_failed=false + break + fi + done + fi + if "$all_failed"; then + sslv2_sockets + [[ $? -eq 3 ]] && all_failed=false && add_tls_offered ssl2 yes + fi + ALL_FAILED_SOCKETS="$all_failed" + return 0 +} + + +# This function determines (STARTTLS_)OPTIMAL_PROTO. It is basically a workaround function as under certain +# circumstances a ClientHello without specifying a protocol will fail. +# Circumstances observed so far: 1.) IIS 6 and openssl 1.0.2 as opposed to 1.0.1 2.) starttls + dovecot imap. +# Independent on the server side it seems reasonable to to know upfront which protocol always works +# +# arg1: if empty: no STARTTLS, else: STARTTLS protocol +# The first try in the loop is empty as we prefer not to specify always a protocol if we can get along w/o it +# +determine_optimal_proto() { + local all_failed=true + local tmp="" + local proto optimal_proto + + "$do_tls_sockets" && return 0 + + >$ERRFILE + if [[ -n "$1" ]]; then + # STARTTLS workaround needed see https://github.com/drwetter/testssl.sh/issues/188 -- kind of odd + for STARTTLS_OPTIMAL_PROTO in -tls1_2 -tls1 -ssl3 -tls1_1 -tls1_3 -ssl2; do + case $STARTTLS_OPTIMAL_PROTO in + -tls1_3) "$HAS_TLS13" || continue ;; + -ssl3) "$HAS_SSL3" || continue ;; + -ssl2) "$HAS_SSL2" || continue ;; + *) ;; + esac + $OPENSSL s_client $(s_client_options "$STARTTLS_OPTIMAL_PROTO $BUGS -connect "$NODEIP:$PORT" $PROXY -msg $STARTTLS $SNI") $TMPFILE 2>>$ERRFILE + if sclient_auth $? $TMPFILE; then + all_failed=false + add_tls_offered "${STARTTLS_OPTIMAL_PROTO/-/}" yes + break + fi + done + "$all_failed" && STARTTLS_OPTIMAL_PROTO="" + optimal_proto="$STARTTLS_OPTIMAL_PROTO" + debugme echo "STARTTLS_OPTIMAL_PROTO: $STARTTLS_OPTIMAL_PROTO" + else + # No STARTTLS + for proto in '' -tls1_2 -tls1 -tls1_3 -ssl3 -tls1_1 -ssl2; do + case $proto in + -tls1_3) "$HAS_TLS13" || continue ;; + -ssl3) "$HAS_SSL3" || continue ;; + -ssl2) "$HAS_SSL2" || continue ;; + *) ;; + esac + $OPENSSL s_client $(s_client_options "$proto $BUGS -connect "$NODEIP:$PORT" -msg $PROXY $SNI") $TMPFILE 2>>$ERRFILE + if sclient_auth $? $TMPFILE; then + # we use the successful handshake at least to get one valid protocol supported -- it saves us time later + if [[ -z "$proto" ]]; then + # convert to openssl terminology + tmp=$(get_protocol $TMPFILE) + tmp=${tmp/\./_} + tmp=${tmp/v/} + tmp="$(tolower $tmp)" + add_tls_offered "${tmp}" yes + debugme echo "one proto determined: $tmp" + OPTIMAL_PROTO="" + else + add_tls_offered "${proto/-/}" yes + OPTIMAL_PROTO="$proto" + fi + all_failed=false + break + fi + done + "$all_failed" && OPTIMAL_PROTO="" + optimal_proto="$OPTIMAL_PROTO" + + debugme echo "OPTIMAL_PROTO: $OPTIMAL_PROTO" + fi + [[ "$optimal_proto" != -ssl2 ]] && ! "$all_failed" && grep -q '^Server Temp Key' $TMPFILE && HAS_DH_BITS=true # FIX #190 + if [[ "$(has_server_protocol "tls1_3")" -eq 0 ]] && [[ "$(has_server_protocol "tls1_2")" -ne 0 ]] && + [[ "$(has_server_protocol "tls1_1")" -ne 0 ]] && [[ "$(has_server_protocol "tls1")" -ne 0 ]] && + [[ "$(has_server_protocol "ssl3")" -ne 0 ]]; then + TLS13_ONLY=true + fi + + if [[ "$optimal_proto" == -ssl2 ]]; then + prln_magenta "$NODEIP:$PORT appears to only support SSLv2." + ignore_no_or_lame " Type \"yes\" to proceed and accept false negatives or positives" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + elif "$all_failed" && ! "$ALL_FAILED_SOCKETS"; then + if ! "$HAS_TLS13" && "$TLS13_ONLY"; then + pr_magenta " $NODE:$PORT appears to support TLS 1.3 ONLY. You better use --openssl=" + if ! "$OSSL_SHORTCUT" || [[ ! -x /usr/bin/openssl ]] || /usr/bin/openssl s_client -tls1_3 -connect invalid. 2>&1 | grep -aiq "unknown option"; then + outln + ignore_no_or_lame " Type \"yes\" to proceed and accept all scan problems" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + MAX_OSSL_FAIL=10 + else + # dirty hack but an idea for the future to be implemented upfront: Now we know, we'll better off + # with the OS supplied openssl binary. We need to inittialize variables / arrays again though. + # And the service detection can't be made up for now + outln ", \n proceeding with /usr/bin/openssl" + OPENSSL=/usr/bin/openssl + find_openssl_binary + prepare_arrays + fi + elif ! "$HAS_SSL3" && [[ "$(has_server_protocol "ssl3")" -eq 0 ]] && [[ "$(has_server_protocol "tls1_3")" -ne 0 ]] && \ + [[ "$(has_server_protocol "tls1_2")" -ne 0 ]] && [[ "$(has_server_protocol "tls1_1")" -ne 0 ]] && + [[ "$(has_server_protocol "tls1")" -ne 0 ]]; then + prln_magenta " $NODE:$PORT appears to support SSLv3 ONLY. You better use --openssl=" + ignore_no_or_lame " Type \"yes\" to proceed and accept all scan problems" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + MAX_OSSL_FAIL=10 + else + prln_bold " Your OpenSSL cannot connect to $NODEIP:$PORT" + ignore_no_or_lame " The results might look ok but they could be nonsense. Really proceed ? (\"yes\" to continue)" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + fi + elif "$all_failed"; then + outln + if "$HAS_IPv6"; then + pr_bold " Your $OPENSSL is not IPv6 aware, or $NODEIP:$PORT " + else + pr_bold " $NODEIP:$PORT " + fi + tmpfile_handle ${FUNCNAME[0]}.txt + prln_bold "doesn't seem to be a TLS/SSL enabled server"; + ignore_no_or_lame " The results might look ok but they could be nonsense. Really proceed ? (\"yes\" to continue)" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + elif ! "$all_failed" && "$ALL_FAILED_SOCKETS" && ! "$SSL_NATIVE"; then + # For some reason connecting with tls_sockets/sslv2_sockets didn't work, but connecting + # with $OPENSSL s_client did. + # FIXME: Should we include some sort of "please report" note here? + prln_magenta " Testing with $NODE:$PORT only worked using $OPENSSL." + prln_magenta " Test results may be somewhat better if the --ssl-native option is used." + ignore_no_or_lame " Type \"yes\" to proceed and accept false negatives or positives" "yes" + [[ $? -ne 0 ]] && exit $ERR_CLUELESS + fi + + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 +} + + +# arg1 (optional): ftp smtp, lmtp, pop3, imap, xmpp, telnet, ldap, postgres, mysql, irc, nntp (maybe with trailing s) +# +determine_service() { + local ua + local protocol + local basicauth_header="" + + # Check if we can connect to $NODEIP:$PORT. Attention: This ALWAYS uses sockets. Thus timeouts for --ssl-=native do not apply + if ! fd_socket 5; then + if [[ -n "$PROXY" ]]; then + fatal "You're sure $PROXYNODE:$PROXYPORT allows tunneling here? Can't connect to \"$NODEIP:$PORT\"" $ERR_CONNECT + else + if "$MULTIPLE_CHECKS"; then + ip_fatal "Couldn't connect to $NODEIP:$PORT" + return 1 + else + fatal "Can't connect to \"$NODEIP:$PORT\"\nMake sure a firewall is not between you and your scanning target!" $ERR_CONNECT + fi + fi + fi + close_socket + + outln + if [[ -z "$1" ]]; then + # no STARTTLS. + determine_optimal_sockets_params + determine_optimal_proto + $SNEAKY && \ + ua="$UA_SNEAKY" || \ + ua="$UA_STD" + if [[ -n "$BASICAUTH" ]]; then + basicauth_header="Authorization: Basic $(safe_echo "$BASICAUTH" | $OPENSSL base64 2>/dev/null)\r\n" + fi + GET_REQ11="GET $URL_PATH HTTP/1.1\r\nHost: $NODE\r\nUser-Agent: $ua\r\n${basicauth_header}Accept-Encoding: identity\r\nAccept: text/*\r\nConnection: Close\r\n\r\n" + # returns always 0: + service_detection $OPTIMAL_PROTO + else # STARTTLS + if [[ "$1" == postgres ]]; then + protocol="postgres" + else + protocol=${1%s} # strip trailing 's' in ftp(s), smtp(s), pop3(s), etc + fi + + case "$protocol" in + ftp|smtp|lmtp|pop3|imap|xmpp|telnet|ldap|postgres|mysql|nntp) + STARTTLS="-starttls $protocol" + if [[ "$protocol" == xmpp ]]; then + # for XMPP, openssl has a problem using -connect $NODEIP:$PORT. thus we use -connect $NODE:$PORT instead! + NODEIP="$NODE" + if [[ -n "$XMPP_HOST" ]]; then + if ! "$HAS_XMPP"; then + fatal "Your $OPENSSL does not support the \"-xmpphost\" option" $ERR_OSSLBIN + fi + STARTTLS="$STARTTLS -xmpphost $XMPP_HOST" # small hack -- instead of changing calls all over the place + # see https://xmpp.org/rfcs/rfc3920.html + else + if is_ipv4addr "$NODE"; then + # XMPP needs a jabber domainname + if [[ -n "$rDNS" ]]; then + prln_warning " IP address doesn't work for XMPP, trying PTR record $rDNS" + # remove trailing . + NODE=${rDNS%%.} + NODEIP=${rDNS%%.} + else + fatal "No DNS supplied and no PTR record available which I can try for XMPP" $ERR_DNSLOOKUP + fi + fi + fi + elif [[ "$protocol" == postgres ]]; then + # Check if openssl version supports postgres. + if ! "$HAS_POSTGRES"; then + fatal "Your $OPENSSL does not support the \"-starttls postgres\" option" $ERR_OSSLBIN + fi + elif [[ "$protocol" == mysql ]]; then + # Check if openssl version supports mysql. + if ! "$HAS_MYSQL"; then + fatal "Your $OPENSSL does not support the \"-starttls mysql\" option" $ERR_OSSLBIN + fi + elif [[ "$protocol" == lmtp ]]; then + # Check if openssl version supports lmtp. + if ! "$HAS_LMTP"; then + fatal "Your $OPENSSL does not support the \"-starttls lmtp\" option" $ERR_OSSLBIN + fi + elif [[ "$protocol" == nntp ]]; then + # Check if openssl version supports lmtp. + if ! "$HAS_NNTP"; then + fatal "Your $OPENSSL does not support the \"-starttls nntp\" option" $ERR_OSSLBIN + fi + fi + determine_optimal_sockets_params + determine_optimal_proto "$1" + + out " Service set:$CORRECT_SPACES STARTTLS via " + out "$(toupper "$protocol")" + [[ "$protocol" == mysql ]] && out " (experimental)" + fileout "service" "INFO" "$protocol" + [[ -n "$XMPP_HOST" ]] && out " (XMPP domain=\'$XMPP_HOST\')" + outln + ;; + *) outln + fatal "momentarily only ftp, smtp, lmtp, pop3, imap, xmpp, telnet, ldap, nntp, postgres and mysql allowed" $ERR_CMDLINE + ;; + esac + fi + tmpfile_handle ${FUNCNAME[0]}.txt + return 0 # OPTIMAL_PROTO, GET_REQ*/HEAD_REQ* is set now +} + + +# Sets SERVER_SIZE_LIMIT_BUG to true or false, depending on whether we hit the 128 cipher limit. +# Return value is 0 unless we have a problem executing +# +determine_sizelimitbug() { + # overflow_cipher must be some cipher that does not appear in TLS12_CIPHER. + local overflow_cipher='C0,86' + local -i nr_ciphers + + # For STARTTLS protcols not being implemented yet via sockets this is a bypass otherwise it won't be usable at all (e.g. LDAP) + # Fixme: find out whether we can't skip this in general for STARTTLS + [[ "$STARTTLS" =~ ldap ]] && return 0 + [[ "$STARTTLS" =~ irc ]] && return 0 + + # Only with TLS 1.2 offered at the server side it is possible to hit this bug, in practise. Thus + # we assume if TLS 1.2 is not supported, the server has no cipher size limit bug. It still may, + # theoretically, but in a regular check with testssl.sh we won't hit this limit with lower protocols. + # Upon calling this function we already know whether TLS 1.2 is supported. If TLS 1.2 is supported, we + # send 129 ciphers (including 00FF) and check whether it works. + + if [[ 1 -eq $(has_server_protocol 03) ]]; then + SERVER_SIZE_LIMIT_BUG=false + else + if [[ "$DEBUG" -ge 1 ]]; then + nr_ciphers="$(tr ' ' '\n' <<< "${overflow_cipher}, $TLS12_CIPHER" | sed -e '/^$/d' | wc -l)" + if [[ $nr_ciphers -ne 129 ]]; then + prln_warning "FIXME line $LINENO, ${FUNCNAME[0]} sending $nr_ciphers ciphers rather than 129." + else + debugme echo "${FUNCNAME[0]} sending $nr_ciphers ciphers" + fi + fi + tls_sockets 03 "${overflow_cipher}, ${TLS12_CIPHER}" + if [[ $? -eq 0 ]]; then + SERVER_SIZE_LIMIT_BUG=false + else + SERVER_SIZE_LIMIT_BUG=true + fi + debugme echo -e "\nSERVER_SIZE_LIMIT_BUG: $SERVER_SIZE_LIMIT_BUG" + fi + if "$SERVER_SIZE_LIMIT_BUG"; then + out " Pre-test: " + prln_svrty_medium "128 cipher limit bug" + fileout "pre_128cipher" "MEDIUM" "128 cipher limit bug" + else + [[ "$DEBUG" -ge 1 ]] && outln " Pre-test: No 128 cipher limit bug" + fileout "pre_128cipher" "INFO" "No 128 cipher limit bug" + fi + return 0 +} + + +display_rdns_etc() { + local ip further_ip_addrs="" + local nodeip="$(tr -d '[]' <<< $NODEIP)" # for displaying IPv6 addresses we don't need [] + + if [[ -n "$PROXY" ]]; then + out " Via Proxy: $CORRECT_SPACES" + outln "$PROXYIP:$PROXYPORT " + fi + if [[ $(count_words "$IP46ADDRs") -gt 1 ]]; then + out " Further IP addresses: $CORRECT_SPACES" + for ip in $IP46ADDRs; do + if [[ "$ip" == "$NODEIP" ]] || [[ "[$ip]" == "$NODEIP" ]]; then + continue + else + further_ip_addrs+="$ip " + fi + done + outln "$(out_row_aligned_max_width "$further_ip_addrs" " $CORRECT_SPACES" $TERM_WIDTH)" + fi + if "$LOCAL_A"; then + outln " A record via: $CORRECT_SPACES /etc/hosts " + elif "$LOCAL_AAAA"; then + outln " AAAA record via: $CORRECT_SPACES /etc/hosts " + elif [[ -n "$CMDLINE_IP" ]]; then + if is_ipv6addr $"$CMDLINE_IP"; then + outln " AAAA record via: $CORRECT_SPACES supplied IP \"$CMDLINE_IP\"" + else + outln " A record via: $CORRECT_SPACES supplied IP \"$CMDLINE_IP\"" + fi + fi + if [[ "$rDNS" =~ instructed ]]; then + out "$(printf " %-23s " "rDNS ($nodeip):")" + out "$rDNS" + elif [[ -n "$rDNS" ]]; then + out "$(printf " %-23s " "rDNS ($nodeip):")" + out "$(out_row_aligned_max_width "$rDNS" " $CORRECT_SPACES" $TERM_WIDTH)" + fi +} + +datebanner() { + local scan_time_f="" + + if [[ "$1" =~ Done ]] ; then + scan_time_f="$(printf "%04ss" "$SCAN_TIME")" # 4 digits because of windows + pr_reverse "$1 $(date +%F) $(date +%T) [$scan_time_f] -->> $NODEIP:$PORT ($NODE) <<--" + else + pr_reverse "$1 $(date +%F) $(date +%T) -->> $NODEIP:$PORT ($NODE) <<--" + fi + outln "\n" + [[ "$1" =~ Start ]] && display_rdns_etc +} + +# one line with char $1 over screen width $2 +draw_line() { + out "$(printf -- "$1"'%.s' $(eval "echo {1.."$(($2))"}"))" +} + + +run_mx_all_ips() { + local mxs mx + local mxport + local -i ret=0 + local word="" + + STARTTLS_PROTOCOL="smtp" + # test first higher priority servers + mxs=$(get_mx_record "$1" | sort -n | sed -e 's/^.* //' -e 's/\.$//' | tr '\n' ' ') + if [[ $CMDLINE_IP == one ]]; then + word="as instructed one" # with highest priority + mxs=${mxs%% *} + else + word="the only" + fi + mxport=${2:-25} + if [[ -n "$LOGFILE" ]]; then + prepare_logging + else + prepare_logging "${FNAME_PREFIX}mx-$1" + fi + if [[ -n "$mxs" ]] && [[ "$mxs" != ' ' ]]; then + [[ $(count_words "$mxs") -gt 1 ]] && MULTIPLE_CHECKS=true + if "$MULTIPLE_CHECKS"; then + pr_bold "Testing all MX records (on port $mxport): " + else + pr_bold "Testing $word MX record (on port $mxport): " + fi + outln "$mxs" + [[ $mxport == 465 ]] && STARTTLS_PROTOCOL="" # no starttls for tcp 465, all other ports are starttls + for mx in $mxs; do + draw_line "-" $((TERM_WIDTH * 2 / 3)) + outln + parse_hn_port "$mx:$mxport" + determine_ip_addresses || continue + if [[ $(count_words "$IPADDRs") -gt 1 ]]; then # we have more than one ipv4 address to check + MULTIPLE_CHECKS=true + pr_bold "Testing all IPv4 addresses (port $PORT): "; outln "$IPADDRs" + for ip in $IPADDRs; do + NODEIP="$ip" + lets_roll "${STARTTLS_PROTOCOL}" + done + else + NODEIP="$IPADDRs" + lets_roll "${STARTTLS_PROTOCOL}" + fi + ret=$(($? + ret)) + done + draw_line "-" $((TERM_WIDTH * 2 / 3)) + outln + pr_bold "Done testing all MX records (on port $mxport): "; outln "$mxs" + else + prln_bold " $1 has no MX records(s)" + fi + return $ret +} + +# If run_mass_testing() is being used, then create the command line +# for the test based on the global command line (all elements of the +# command line provided to the parent, except the --file/-iL option) and the +# specific command line options for the test to be run. Each argument +# in the command line needs to be a separate element in an array in order +# to deal with word splitting within file names (see #702). +# +# If run_mass_testing_parallel() is being used, then in addition to the above, +# modify global command line for child tests so that if all (JSON, CSV, HTML) +# output is to go into a single file, each child will have its output placed in +# a separate, named file, so that the separate files can be concatenated +# together once they are complete to create the single file. +# +# If run_mass_testing() is being used, then "$1" is "serial". If +# run_mass_testing_parallel() is being used, then "$1" is "parallel XXXXXXXX" +# where XXXXXXXX is the number of the test being run. +# +create_mass_testing_cmdline() { + local testing_type="$1" + local cmd test_number + local outfile_arg + local -i nr_cmds=0 index=0 + local skip_next=false + + MASS_TESTING_CMDLINE=() + [[ "$testing_type" =~ parallel ]] && read -r testing_type test_number <<< "$testing_type" + + # Start by adding the elements from the global command line to the command line for the + # test. If run_mass_testing_parallel(), then modify the command line so that, when + # required, each child process sends its test results to a separate file. If a cmd + # uses '=' for supplying a value we just skip next parameter (we don't use 'parse_opt_equal_sign' here) + debugme echo "${CMDLINE_ARRAY[@]}" + for cmd in "${CMDLINE_ARRAY[@]}"; do + "$skip_next" && skip_next=false && index+=1 && continue + if [[ "$cmd" =~ --file ]] || [[ "$cmd" =~ -iL ]]; then + # Don't include the "--file[=...] or -iL argument in the child's command + # line, but do include "--warnings=batch". + MASS_TESTING_CMDLINE[nr_cmds]="--warnings=batch" + nr_cmds+=1 + # next is the file itself, as no '=' was supplied + [[ "$cmd" == --file ]] && skip_next=true + [[ "$cmd" == -iL ]] && skip_next=true + elif [[ "$testing_type" == serial ]]; then + if "$JSONHEADER" && ( [[ "$cmd" =~ --jsonfile-pretty ]] || [[ "$cmd" =~ -oJ ]] ); then + >"$TEMPDIR/jsonfile_child.json" + MASS_TESTING_CMDLINE[nr_cmds]="--jsonfile-pretty=$TEMPDIR/jsonfile_child.json" + # next is the jsonfile itself, as no '=' was supplied + [[ "$cmd" == --jsonfile-pretty ]] && skip_next=true + [[ "$cmd" == -oJ ]] && skip_next=true + elif "$JSONHEADER" && ( [[ "$cmd" =~ --jsonfile ]] || [[ "$cmd" =~ -oj ]] ); then + >"$TEMPDIR/jsonfile_child.json" + MASS_TESTING_CMDLINE[nr_cmds]="--jsonfile=$TEMPDIR/jsonfile_child.json" + # next is the jsonfile itself, as no '=' was supplied + [[ "$cmd" == --jsonfile ]] && skip_next=true + [[ "$cmd" == -oj ]] && skip_next=true + elif "$JSONHEADER" && ( [[ "$cmd" =~ --outFile ]] || [[ "$cmd" =~ -oA ]] ); then + outfile_arg="$(parse_opt_equal_sign "$cmd" "${CMDLINE_ARRAY[index+1]}")" + >"$TEMPDIR/jsonfile_child.json" + MASS_TESTING_CMDLINE[nr_cmds]="-oJ=$TEMPDIR/jsonfile_child.json" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oC=$outfile_arg.csv" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oH=$outfile_arg.html" + # next is the filename itself, as no '=' was supplied + [[ "$cmd" == --outFile ]] && skip_next=true + [[ "$cmd" == -oA ]] && skip_next=true + elif "$JSONHEADER" && ( [[ "$cmd" =~ --outfile ]] || [[ "$cmd" =~ -oa ]] ); then + outfile_arg="$(parse_opt_equal_sign "$cmd" "${CMDLINE_ARRAY[index+1]}")" + >"$TEMPDIR/jsonfile_child.json" + MASS_TESTING_CMDLINE[nr_cmds]="-oj=$TEMPDIR/jsonfile_child.json" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oC=$outfile_arg.csv" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oH=$outfile_arg.html" + # next is the filename itself, as no '=' was supplied + [[ "$cmd" == --outfile ]] && skip_next=true + [[ "$cmd" == -oa ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + nr_cmds+=1 + else + case "$cmd" in + --jsonfile|--jsonfile=*|-oj|-oj=*) + # If is a file, then have provide a different + # file name to each child process. If is a + # directory, then just pass it on to the child processes. + if "$JSONHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="--jsonfile=$TEMPDIR/jsonfile_${test_number}.json" + # next is the jsonfile itself, as no '=' was supplied + [[ "$cmd" == --jsonfile ]] && skip_next=true + [[ "$cmd" == -oj ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + --jsonfile-pretty|--jsonfile-pretty=*|-oJ|-oJ=*) + if "$JSONHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="--jsonfile-pretty=$TEMPDIR/jsonfile_${test_number}.json" + [[ "$cmd" == --jsonfile-pretty ]] && skip_next=true + [[ "$cmd" == -oJ ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + --csvfile|--csvfile=*|-oC|-oC=*) + if "$CSVHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="--csvfile=$TEMPDIR/csvfile_${test_number}.csv" + [[ "$cmd" == --csvfile ]] && skip_next=true + [[ "$cmd" == -oC ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + --htmlfile|--htmlfile=*|-oH|-oH=*) + if "$HTMLHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="--htmlfile=$TEMPDIR/htmlfile_${test_number}.html" + [[ "$cmd" == --htmlfile ]] && skip_next=true + [[ "$cmd" == -oH ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + --outfile|--outfile=*|-oa|-oa=*) + if "$JSONHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="-oj=$TEMPDIR/jsonfile_${test_number}.json" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oC=$TEMPDIR/csvfile_${test_number}.csv" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oH=$TEMPDIR/htmlfile_${test_number}.html" + # next is the filename itself, as no '=' was supplied + [[ "$cmd" == --outfile ]] && skip_next=true + [[ "$cmd" == -oa ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + --outFile|--outFile=*|-oA|-oA=*) + if "$JSONHEADER"; then + MASS_TESTING_CMDLINE[nr_cmds]="-oJ=$TEMPDIR/jsonfile_${test_number}.json" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oC=$TEMPDIR/csvfile_${test_number}.csv" + nr_cmds+=1 + MASS_TESTING_CMDLINE[nr_cmds]="-oH=$TEMPDIR/htmlfile_${test_number}.html" + # next is the filename itself, as no '=' was supplied + [[ "$cmd" == --outFile ]] && skip_next=true + [[ "$cmd" == -oA ]] && skip_next=true + else + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + fi + ;; + *) + MASS_TESTING_CMDLINE[nr_cmds]="$cmd" + ;; + esac + nr_cmds+=1 + fi + index+=1 + done + + # Now add the command line arguments for the specific test to the command line. + # Skip the first argument sent to this function, since it specifies the type of testing being performed. + shift + while [[ $# -gt 0 ]]; do + MASS_TESTING_CMDLINE[nr_cmds]="$1" + nr_cmds+=1 + shift + done + + return 0 +} + + +ports2starttls() { + local tcp_port=$1 + local ret=0 + + # https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers + case $tcp_port in + 21) echo "-t ftp " ;; + 23) echo "-t telnet " ;; + 119|433) echo "-t nntp " ;; # to come + 25|587) echo "-t smtp " ;; + 110) echo "-t pop3 " ;; + 143) echo "-t imap " ;; + 389) echo "-t ldap ";; + 3306) echo "-t mysql " ;; + 5222) echo "-t xmpp " ;; # domain of jabber server maybe needed + 5432) echo "-t postgres " ;; + 563) ;; # NNTPS + 636) ;; # LDAP + 1443|8443|443|981) ;; # HTTPS + 465) ;; # HTTPS | SMTP + 631) ;; # CUPS + 853) ;; # DNS over TLS + 995|993) ;; # POP3|IMAP + 3389) ;; # RDP + *) ret=1 ;; # we don't know this ports so we rather do not scan it + esac + return $ret +} + +nmap_to_plain_file() { + local target_fname="" + local oneline="" + local ip hostdontcare round_brackets ports_specs starttls + local tmp port host_spec protocol dontcare dontcare1 + #FIXME: IPv6 is missing here + + # Ok, since we are here we are sure to have an nmap file. To avoid questions we make sure it's the right format too + if [[ "$(head -1 "$FNAME")" =~ ( -oG )(.*) ]] || [[ "$(head -1 "$FNAME")" =~ ( -oA )(.*) ]] ; then + # yes, greppable + if [[ $(grep -c Status "$FNAME") -ge 1 ]]; then + [[ $(grep -c '\/open\/' "$FNAME") -eq 0 ]] && \ + fatal "Nmap file $FNAME should contain at least one open port" $ERR_FNAMEPARSE + else + fatal "strange, nmap grepable misses \"Status\"" -1 + fi + else + fatal "Nmap file $FNAME is not in grep(p)able format (-oG filename.g(n)map)" $ERR_FNAMEPARSE + fi + # strip extension and create output file *.txt in same folder + target_fname="${FNAME%.*}.txt" + > "${target_fname}" + if [[ $? -ne 0 ]]; then + # try to just create ${FNAME%.*}.txt in the same dir as the gnmap file failed. + # backup is using one in $TEMPDIR + target_fname="${target_fname##*\/}" # strip path (Unix) + target_fname="${target_fname##*\\}" # strip path (Dos) + target_fname="$TEMPDIR/$target_fname" + > "${target_fname}" || fatal "Cannot create \"${target_fname}\"" $ERR_FCREATE + fi + + # Line x: "Host: AAA.BBB.CCC.DDD () Status: Up" + # Line x+1: "Host: AAA.BBB.CCC.DDD () Ports: 443/open/tcp//https///" + # (or): Host: AAA.BBB.CCC.DDD () Ports: 22/open/tcp//ssh///, 25/open/tcp//smtp///, 443/open/tcp//ssl|http// + while read -r hostdontcare ip round_brackets tmp ports_specs; do + [[ "$ports_specs" =~ "Status: " ]] && continue # we don't need this + [[ "$ports_specs" =~ '/open/tcp/' ]] || continue # no open tcp at all for this IP --> move + host_spec="$ip" + fqdn="${round_brackets/\(/}" + fqdn="${fqdn/\)/}" + if [[ -n "$fqdn" ]]; then + tmp="$(get_a_record "$fqdn")" + debugme echo "$tmp \?= $ip" + if [[ "$tmp" == "$ip" ]]; then + host_spec="$fqdn" + fi + fi + while read -r oneline; do + # 25/open/tcp//smtp///, + [[ "$oneline" =~ '/open/tcp/' ]] || continue # no open tcp for this port on this IP --> move on + IFS=/ read -r port dontcare protocol dontcare1 <<< "$oneline" + starttls="$(ports2starttls $port)" + [[ $? -eq 1 ]] && continue # nmap got a port but we don't know how to speak to + [[ "$DEBUG" -ge 1 ]] && echo "${starttls}$host_spec:$port" + echo "${starttls}${host_spec}:${port}" >>"$target_fname" + done < <(tr ',' '\n' <<< "$ports_specs") + done < "$FNAME" + [[ "$DEBUG" -ge 1 ]] && echo + + [[ -s "$target_fname" ]] || \ + fatal "Couldn't find any open port in $FNAME" $ERR_FNAMEPARSE + export FNAME=$target_fname +} + +run_mass_testing() { + local cmdline="" + local first=true + local gnmapadd="" + local saved_fname="$FNAME" + + if [[ ! -r "$FNAME" ]] && "$IKNOW_FNAME"; then + fatal "Can't read file \"$FNAME\"" $ERR_FNAMEPARSE + fi + + if [[ "$(head -1 "$FNAME")" =~ (Nmap [4-8])(.*)( scan initiated )(.*) ]]; then + gnmapadd="grep(p)able nmap " + nmap_to_plain_file + fi + + pr_reverse "====== Running in file batch mode with ${gnmapadd}file=\"$saved_fname\" ======"; outln "\n" + while read -r cmdline; do + cmdline="$(filter_input "$cmdline")" + [[ -z "$cmdline" ]] && continue + [[ "$cmdline" == EOF ]] && break + # Create the command line for the child in the form of an array (see #702) + create_mass_testing_cmdline "serial" $cmdline + draw_line "=" $((TERM_WIDTH / 2)); outln; + outln "$(create_cmd_line_string "$0" "${MASS_TESTING_CMDLINE[@]}")" + # we call ourselves here. $do_mass_testing is the parent, $CHILD_MASS_TESTING... you figured + if [[ -z "$(type -p "$0")" ]]; then + CHILD_MASS_TESTING=true "$RUN_DIR/$PROG_NAME" "${MASS_TESTING_CMDLINE[@]}" + else + CHILD_MASS_TESTING=true "$0" "${MASS_TESTING_CMDLINE[@]}" + fi + if "$JSONHEADER" && [[ -s "$TEMPDIR/jsonfile_child.json" ]]; then + # Need to ensure that a separator is only added if the test + # produced some JSON output. + "$first" || fileout_separator # this is needed for appended output, see #687 + first=false + cat "$TEMPDIR/jsonfile_child.json" >> "$JSONFILE" + FIRST_FINDING=false + fi + done < "${FNAME}" + return $? +} + +# This function is called when it has been determined that the next child +# process has completed or it has been stopped. If the child process completed, +# then this process prints the child process's output to the terminal and, if +# appropriate, adds any JSON, CSV, and HTML output it has created to the +# appropriate file. If the child process was stopped, then a message indicating +# that is printed, but the incomplete results are not used. +# +get_next_message_testing_parallel_result() { + draw_line "=" $((TERM_WIDTH / 2)); outln; + outln "${PARALLEL_TESTING_CMDLINE[NEXT_PARALLEL_TEST_TO_FINISH]}" + if [[ "$1" == completed ]]; then + cat "$TEMPDIR/term_output_$(printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH).log" + if "$JSONHEADER" && [[ -s "$TEMPDIR/jsonfile_$(printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH).json" ]]; then + # Need to ensure that a separator is only added if the test + # produced some JSON output. + "$FIRST_JSON_OUTPUT" || fileout_separator # this is needed for appended output, see #687 + FIRST_JSON_OUTPUT=false + FIRST_FINDING=false + cat "$TEMPDIR/jsonfile_$(printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH).json" >> "$JSONFILE" + fi + "$CSVHEADER" && cat "$TEMPDIR/csvfile_$(printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH).csv" >> "$CSVFILE" + "$HTMLHEADER" && cat "$TEMPDIR/htmlfile_$(printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH).html" >> "$HTMLFILE" + elif [[ "$1" == "stopped" ]]; then + outln "\nTest was stopped before it completed.\n" + else + outln "\nTest timed out before it completed.\n" + fi +} + +#FIXME: not called/tested yet +run_mass_testing_parallel() { + local cmdline="" + local -i i nr_active_tests=0 + local -a -i start_time=() + local -i curr_time wait_time + local gnmapadd="" + local saved_fname="$FNAME" + + if [[ ! -r "$FNAME" ]] && $IKNOW_FNAME; then + fatal "Can't read file \"$FNAME\"" $ERR_FNAMEPARSE + fi + + if [[ "$(head -1 "$FNAME")" =~ (Nmap [4-8])(.*)( scan initiated )(.*) ]]; then + gnmapadd="grep(p)able nmap " + nmap_to_plain_file + fi + + pr_reverse "====== Running in file batch mode with ${gnmapadd}file=\"$saved_fname\" ======"; outln "\n" + while read -r cmdline; do + cmdline="$(filter_input "$cmdline")" + [[ -z "$cmdline" ]] && continue + [[ "$cmdline" == "EOF" ]] && break + # Create the command line for the child in the form of an array (see #702) + create_mass_testing_cmdline "parallel $(printf "%08d" $NR_PARALLEL_TESTS)" $cmdline + + # fileout() won't include the "service" information in the JSON file for the child process + # if the JSON file doesn't already exist. + "$JSONHEADER" && >"$TEMPDIR/jsonfile_$(printf "%08d" $NR_PARALLEL_TESTS).json" + PARALLEL_TESTING_CMDLINE[NR_PARALLEL_TESTS]="$(create_cmd_line_string "$0" "${MASS_TESTING_CMDLINE[@]}")" + if [[ -z "$(type -p "$0")" ]]; then + CHILD_MASS_TESTING=true "$RUN_DIR/$PROG_NAME" "${MASS_TESTING_CMDLINE[@]}" > "$TEMPDIR/term_output_$(printf "%08d" $NR_PARALLEL_TESTS).log" 2>&1 & + else + CHILD_MASS_TESTING=true "$0" "${MASS_TESTING_CMDLINE[@]}" > "$TEMPDIR/term_output_$(printf "%08d" $NR_PARALLEL_TESTS).log" 2>&1 & + fi + PARALLEL_TESTING_PID[NR_PARALLEL_TESTS]=$! + start_time[NR_PARALLEL_TESTS]=$(date +%s) + if "$INTERACTIVE"; then + echo -en "\r \r" 1>&2 + echo -n "Started test #$NR_PARALLEL_TESTS" 1>&2 + [[ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ]] && \ + echo -n " (waiting for test #$NEXT_PARALLEL_TEST_TO_FINISH to finish)" 1>&2 + fi + NR_PARALLEL_TESTS+=1 + nr_active_tests+=1 + sleep $PARALLEL_SLEEP + # Get the results of any completed tests + while [[ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ]]; do + if [[ ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} -eq 0 ]]; then + "$INTERACTIVE" && echo -en "\r \r" 1>&2 + get_next_message_testing_parallel_result "completed" + NEXT_PARALLEL_TEST_TO_FINISH+=1 + elif ! ps ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >/dev/null ; then + "$INTERACTIVE" && echo -en "\r \r" 1>&2 + get_next_message_testing_parallel_result "completed" + NEXT_PARALLEL_TEST_TO_FINISH+=1 + nr_active_tests=$nr_active_tests-1 + else + break + fi + done + if [[ $nr_active_tests -ge $MAX_PARALLEL ]]; then + curr_time=$(date +%s) + while true; do + # Check to see if any test completed + for (( i=NEXT_PARALLEL_TEST_TO_FINISH; i < NR_PARALLEL_TESTS; i++ )); do + if [[ ${PARALLEL_TESTING_PID[i]} -ne 0 ]] && \ + ! ps ${PARALLEL_TESTING_PID[i]} >/dev/null ; then + PARALLEL_TESTING_PID[i]=0 + nr_active_tests=$nr_active_tests-1 + break + fi + done + [[ $nr_active_tests -lt $MAX_PARALLEL ]] && break + if [[ $curr_time-${start_time[NEXT_PARALLEL_TEST_TO_FINISH]} -ge $MAX_WAIT_TEST ]]; then + # No test completed in the allocated time, so the first one to + # start will be killed. + kill ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >&2 2>/dev/null + wait ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} 2>/dev/null # make sure pid terminated, see wait(1p) + "$INTERACTIVE" && echo -en "\r \r" 1>&2 + get_next_message_testing_parallel_result "timeout" + NEXT_PARALLEL_TEST_TO_FINISH+=1 + nr_active_tests=$nr_active_tests-1 + break + fi + # Wake up to increment the counter every second (so that the counter + # appears to users as if it is operating smoothly), but check the + # status of the $MAX_PARALLEL active processes less often, since the + # ps command is expensive. + for (( i=0; i <= $((MAX_PARALLEL/5)); i++ )); do + wait_time=$((curr_time-start_time[NEXT_PARALLEL_TEST_TO_FINISH])) + [[ $wait_time -gt $MAX_WAIT_TEST ]] && wait_time=$MAX_WAIT_TEST + if "$INTERACTIVE"; then + echo -en "\r \r" 1>&2 + echo -n "Waiting for test #$NEXT_PARALLEL_TEST_TO_FINISH to finish" 1>&2 + if [[ $((MAX_WAIT_TEST-wait_time)) -le 60 ]]; then + echo -n " ($((MAX_WAIT_TEST-wait_time)) seconds to timeout)" 1>&2 + else + echo -n " ($wait_time seconds)" 1>&2 + fi + fi + [[ $wait_time -ge $MAX_WAIT_TEST ]] && break + sleep 1 + curr_time=$(date +%s) + done + done + fi + done < "$FNAME" + + # Wait for remaining tests to finish + curr_time=$(date +%s) + while [[ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ]]; do + if [[ ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} -eq 0 ]] || \ + ! ps ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >/dev/null ; then + "$INTERACTIVE" && echo -en "\r \r" 1>&2 + get_next_message_testing_parallel_result "completed" + NEXT_PARALLEL_TEST_TO_FINISH+=1 + elif [[ $curr_time-${start_time[NEXT_PARALLEL_TEST_TO_FINISH]} -ge $MAX_WAIT_TEST ]]; then + kill ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} >&2 2>/dev/null + wait ${PARALLEL_TESTING_PID[NEXT_PARALLEL_TEST_TO_FINISH]} 2>/dev/null # make sure pid terminated, see wait(1p) + "$INTERACTIVE" && echo -en "\r \r" 1>&2 + get_next_message_testing_parallel_result "timeout" + NEXT_PARALLEL_TEST_TO_FINISH+=1 + else + # Here it is okay to check process status every second, since the + # status of only one process is being checked. + if "$INTERACTIVE"; then + echo -en "\r \r" 1>&2 + wait_time=$((curr_time-start_time[NEXT_PARALLEL_TEST_TO_FINISH])) + [[ $wait_time -gt $MAX_WAIT_TEST ]] && wait_time=$MAX_WAIT_TEST + echo -n "Waiting for test #$NEXT_PARALLEL_TEST_TO_FINISH to finish" 1>&2 + if [[ $((MAX_WAIT_TEST-wait_time)) -le 60 ]]; then + echo -n " ($((MAX_WAIT_TEST-wait_time)) seconds to timeout)" 1>&2 + else + echo -n " ($wait_time seconds)" 1>&2 + fi + fi + sleep 1 + curr_time=$(date +%s) + fi + done + return $? +} + + + +# This initializes boolean global do_* variables. They keep track of what to do +# -- as the name insinuates +initialize_globals() { + do_allciphers=false + do_vulnerabilities=false + do_beast=false + do_lucky13=false + do_breach=false + do_ccs_injection=false + do_ticketbleed=false + do_robot=false + do_cipher_per_proto=false + do_crime=false + do_freak=false + do_logjam=false + do_drown=false + do_header=false + do_heartbleed=false + do_mx_all_ips=false + do_mass_testing=false + do_logging=false + do_json=false + do_pretty_json=false + do_csv=false + do_html=false + do_pfs=false + do_protocols=false + do_rc4=false + do_grease=false + do_renego=false + do_cipherlists=false + do_server_defaults=false + do_server_preference=false + do_ssl_poodle=false + do_sweet32=false + do_tls_fallback_scsv=false + do_cipher_match=false + do_tls_sockets=false + do_client_simulation=false + do_display_only=false + do_starttls=false +} + + +# Set default scanning options for the boolean global do_* variables. +set_scanning_defaults() { + do_allciphers=true + do_vulnerabilities=true + do_beast=true + do_lucky13=true + do_breach=true + do_heartbleed="$OFFENSIVE" + do_ccs_injection="$OFFENSIVE" + do_ticketbleed="$OFFENSIVE" + do_robot="$OFFENSIVE" + do_crime=true + do_freak=true + do_logjam=true + do_drown=true + do_ssl_poodle=true + do_sweet32=true + do_header=true + do_pfs=true + do_rc4=true + do_protocols=true + do_renego=true + do_cipherlists=true + do_server_defaults=true + do_server_preference=true + do_tls_fallback_scsv=true + do_client_simulation=true + if "$OFFENSIVE"; then + VULN_COUNT=16 + else + VULN_COUNT=12 + fi +} + +# returns number of $do variables set = number of run_funcs() to perform +count_do_variables() { + local gbl + local true_nr=0 + + for gbl in do_allciphers do_vulnerabilities do_beast do_lucky13 do_breach do_ccs_injection do_ticketbleed do_cipher_per_proto do_crime \ + do_freak do_logjam do_drown do_header do_heartbleed do_mx_all_ips do_pfs do_protocols do_rc4 do_grease do_robot do_renego \ + do_cipherlists do_server_defaults do_server_preference do_ssl_poodle do_tls_fallback_scsv \ + do_sweet32 do_client_simulation do_cipher_match do_tls_sockets do_mass_testing do_display_only; do + [[ "${!gbl}" == true ]] && let true_nr++ + done + return $true_nr +} + + +debug_globals() { + local gbl + + for gbl in do_allciphers do_vulnerabilities do_beast do_lucky13 do_breach do_ccs_injection do_ticketbleed do_cipher_per_proto do_crime \ + do_freak do_logjam do_drown do_header do_heartbleed do_mx_all_ips do_pfs do_protocols do_rc4 do_grease do_robot do_renego \ + do_cipherlists do_server_defaults do_server_preference do_ssl_poodle do_tls_fallback_scsv \ + do_sweet32 do_client_simulation do_cipher_match do_tls_sockets do_mass_testing do_display_only; do + printf "%-22s = %s\n" $gbl "${!gbl}" + done + printf "%-22s : %s\n" URI: "$URI" +} + + +# arg1: either switch+value (=) or switch +# arg2: value (if no = provided) +parse_opt_equal_sign() { + if [[ "$1" == *=* ]]; then + echo ${1#*=} + return 1 # = means we don't need to shift args! + else + echo "$2" + return 0 # we need to shift + fi +} + +# Create the command line string for printing purposes +# See https://stackoverflow.com/questions/10835933/preserve-quotes-in-bash-arguments +create_cmd_line_string() { + local arg + local -a allargs=() + local chars='[ !"#$&()*,;<>?\^`{|}]' + + while [[ $# -gt 0 ]]; do + if [[ $1 == *\'* ]]; then + arg=\""$1"\" + elif [[ $1 == *$chars* ]]; then + arg="'$1'" + else + arg="$1" + fi + allargs+=("$arg") # ${allargs[@]} is to be used only for printing + shift + done + printf '%s\n' "${allargs[*]}" +} + +check_base_requirements() { + local binary='' + local whitelist=' hexdump grep awk sed ' + + for binary in 'hexdump' 'dd' 'grep' 'awk' 'tr' 'sed' 'wc' 'date' 'cat' 'ps' 'kill' 'head' 'tail' 'dirname'; do + if ! type -p "${binary}" &> /dev/null; then + fatal "You need to install ${binary} for this program to work" $ERR_RESOURCE + fi + [[ ${whitelist} =~ \ ${binary}\ ]] && continue + "${binary}" --help 2>&1 | grep -iq busybox + if [[ $? -eq 0 ]]; then + fatal "${binary} is from busybox. Please install a regular binary" $ERR_RESOURCE + fi + done +} + +parse_cmd_line() { + local outfile_arg="" + local cipher_mapping + local -i subret=0 + + CMDLINE="$(create_cmd_line_string "${CMDLINE_ARRAY[@]}")" + CMDLINE_PARSED=false + + case $1 in + --help|"") + help 0 + ;; + -b|--banner|-v|--version) + maketempf + get_install_dir + find_openssl_binary + prepare_debug + mybanner + exit $ALLOK + ;; + esac + + # initializing + initialize_globals + + while [[ $# -gt 0 ]]; do + case $1 in + --mx) + do_mx_all_ips=true + PORT=25 + ;; + --mx465) # doesn't work with major ISPs + do_mx_all_ips=true + PORT=465 + ;; + --mx587) # doesn't work with major ISPs + do_mx_all_ips=true + PORT=587 + ;; + --ip|--ip=*) + CMDLINE_IP="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + if [[ "$CMDLINE_IP" == proxy ]]; then + DNS_VIA_PROXY=true + unset CMDLINE_IP + fi + # normalize any IPv6 address + CMDLINE_IP="${CMDLINE_IP//[/}" # fix vim syntax highlighting "] + CMDLINE_IP="${CMDLINE_IP//]/}" + ;; + -n|--nodns|-n=*|--nodns=*) + NODNS="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + if [[ "$NODNS" != none ]] && [[ "$NODNS" != min ]]; then + fatal "Value for nodns switch can be either \"min\" or \"none\"" $ERR_CMDLINE + fi + ;; + -V|-V=*|--local|--local=*) # attention, this could have a value or not! + do_display_only=true + PATTERN2SHOW="$(parse_opt_equal_sign "$1" "$2")" + subret=$? + if [[ "$PATTERN2SHOW" == -* ]]; then + unset PATTERN2SHOW # we hit the next command ==> not our value + else # it was ours, point to next arg + [[ $subret -eq 0 ]] && shift + fi + ;; + -x|-x=*|--single[-_]cipher|--single[-_]cipher=*) + do_cipher_match=true + single_cipher=$(parse_opt_equal_sign "$1" "$2") + [[ $? -eq 0 ]] && shift + ;; + -t|-t=*|--starttls|--starttls=*) + do_starttls=true + STARTTLS_PROTOCOL="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + case $STARTTLS_PROTOCOL in + ftp|smtp|lmtp|pop3|imap|xmpp|telnet|ldap|irc|nntp|postgres|mysql) ;; + ftps|smtps|lmtps|pop3s|imaps|xmpps|telnets|ldaps|ircs|nntps|mysqls) ;; + *) tmln_magenta "\nunrecognized STARTTLS protocol \"$1\", see help" 1>&2 + help 1 ;; + esac + ;; + --xmpphost|--xmpphost=*) + XMPP_HOST=$(parse_opt_equal_sign "$1" "$2") + [[ $? -eq 0 ]] && shift + ;; + -e|--each-cipher) + do_allciphers=true + ;; + -E|--cipher-per-proto|--cipher_per_proto) + do_cipher_per_proto=true + ;; + -p|--protocols) + do_protocols=true + ;; + -s|--std|--standard) + do_cipherlists=true + ;; + -S|--server[-_]defaults) + do_server_defaults=true + ;; + -P|--server[_-]preference|--preference) + do_server_preference=true + ;; + -h|--header|--headers) + do_header=true + ;; + -c|--client-simulation) + do_client_simulation=true + ;; + -U|--vulnerable|--vulnerabilities) + do_vulnerabilities=true + do_heartbleed="$OFFENSIVE" + do_ccs_injection="$OFFENSIVE" + do_ticketbleed="$OFFENSIVE" + do_robot="$OFFENSIVE" + do_renego=true + do_crime=true + do_breach=true + do_ssl_poodle=true + do_tls_fallback_scsv=true + do_sweet32=true + do_freak=true + do_drown=true + do_logjam=true + do_beast=true + do_lucky13=true + do_rc4=true + if "$OFFENSIVE"; then + VULN_COUNT=16 + else + VULN_COUNT=12 + fi + ;; + --ids-friendly) + OFFENSIVE=false + ;; + -H|--heartbleed) + do_heartbleed=true + let "VULN_COUNT++" + ;; + -I|--ccs|--ccs[-_]injection) + do_ccs_injection=true + let "VULN_COUNT++" + ;; + -T|--ticketbleed) + do_ticketbleed=true + let "VULN_COUNT++" + ;; + -BB|--robot) + do_robot=true + ;; + -R|--renegotiation) + do_renego=true + let "VULN_COUNT++" + ;; + -C|--compression|--crime) + do_crime=true + let "VULN_COUNT++" + ;; + -B|--breach) + do_breach=true + let "VULN_COUNT++" + ;; + -O|--poodle) + do_ssl_poodle=true + do_tls_fallback_scsv=true + let "VULN_COUNT++" + ;; + -Z|--tls[_-]fallback|tls[_-]fallback[_-]scs) + do_tls_fallback_scsv=true + let "VULN_COUNT++" + ;; + -W|--sweet32) + do_sweet32=true + let "VULN_COUNT++" + ;; + -F|--freak) + do_freak=true + let "VULN_COUNT++" + ;; + -D|--drown) + do_drown=true + let "VULN_COUNT++" + ;; + -J|--logjam) + do_logjam=true + let "VULN_COUNT++" + ;; + -A|--beast) + do_beast=true + let "VULN_COUNT++" + ;; + -L|--lucky13) + do_lucky13=true + let "VULN_COUNT++" + ;; + -4|--rc4|--appelbaum) + do_rc4=true + let "VULN_COUNT++" + ;; + -f|--pfs|--fs|--nsa) + do_pfs=true + ;; + -g|--grease) + do_grease=true + ;; + -9|--full) + set_scanning_defaults + do_allciphers=false + do_cipher_per_proto=true + do_grease=true + ;; + --add-ca|--add-CA|--add-ca=*|--add-CA=*) + ADDITIONAL_CA_FILES="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --devel) ### this development feature will soon disappear + # arg1: SSL/TLS protocol (SSLv2=22) + # arg2: list of cipher suites / hostname/ip + # arg3: hostname/ip + HEX_CIPHER="$TLS12_CIPHER" + # DEBUG=3 ./testssl.sh --devel 04 "13,02, 13,01" google.com --> TLS 1.3 + # DEBUG=3 ./testssl.sh --devel 03 "cc, 13, c0, 13" google.de --> TLS 1.2, old CHACHA/POLY + # DEBUG=3 ./testssl.sh --devel 03 "cc,a8, cc,a9, cc,aa, cc,ab, cc,ac" blog.cloudflare.com --> new CHACHA/POLY + # DEBUG=3 ./testssl.sh --devel 01 yandex.ru --> TLS 1.0 + # DEBUG=3 ./testssl.sh --devel 00 + # DEBUG=3 ./testssl.sh --devel 22 + TLS_LOW_BYTE="$2"; + if [[ $# -eq 4 ]]; then # protocol AND ciphers specified + HEX_CIPHER="$3" + shift + fi + shift + do_tls_sockets=true + outln "\nTLS_LOW_BYTE, HEX_CIPHER: \"${TLS_LOW_BYTE}\", \"${HEX_CIPHER}\"" + ;; + --wide) + WIDE=true + ;; + --assuming[_-]http|--assume[-_]http) + ASSUME_HTTP=true + ;; + --sneaky) + SNEAKY=true + ;; + -q|--quiet) + QUIET=true + ;; + --file|--file=*|-iL|-iL=*) + # no shift here as otherwise URI is empty and it bails out + FNAME="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + IKNOW_FNAME=true + WARNINGS=batch # set this implicitly! + do_mass_testing=true + ;; + --mode|--mode=*) + MASS_TESTING_MODE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + case "$MASS_TESTING_MODE" in + serial|parallel) ;; + *) tmln_magenta "\nmass testing mode can be either \"serial\" or \"parallel\"" + help 1 + esac + ;; + --serial) + MASS_TESTING_MODE=serial + ;; + --parallel) + MASS_TESTING_MODE=parallel + ;; + --warnings|--warnings=*) + WARNINGS=$(parse_opt_equal_sign "$1" "$2") + [[ $? -eq 0 ]] && shift + case "$WARNINGS" in + batch|off) ;; + *) tmln_magenta "\nwarnings can be either \"batch\", or \"off\"" + help 1 + esac + ;; + --show[-_]each) + SHOW_EACH_C=true + ;; + --fast) + FAST=true + ;; + --bugs) + BUGS="-bugs" + ;; + --debug|--debug=*) + DEBUG=$(parse_opt_equal_sign "$1" "$2") + [[ $? -eq 0 ]] && shift + case $DEBUG in + [0-6]) ;; + *) tmln_magenta_term "\nunrecognized debug value \"$1\", must be between 0..6" 1>&2 + help 1 + esac + ;; + --color|--color=*) + COLOR="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + case $COLOR in + [0-3]) ;; + *) COLOR=2 + tmln_magenta "\nunrecognized color: \"$1\", must be between 0..3" 1>&2 + help 1 + esac + ;; + --colorblind) + COLORBLIND=true + ;; + --log|--logging) + "$do_logging" && fatal "two --log* arguments" $ERR_CMDLINE + do_logging=true + ;; # DEFINITION of LOGFILE if no arg specified: automagically in parse_hn_port() + # following does the same but additionally we can specify a log location + --logfile|--logfile=*|-oL|-oL=*) + "$do_logging" && fatal "two --log* arguments" $ERR_CMDLINE + LOGFILE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + do_logging=true + ;; + --json) + "$do_pretty_json" && fatal "flat and pretty JSON output are mutually exclusive" $ERR_CMDLINE + "$do_json" && fatal "--json and --jsonfile are mutually exclusive" $ERR_CMDLINE + if [[ "$2" =~ \.(json|JSON)$ ]]; then + fatal "No file name allowed after \"--json\" (use \"--jsonfile\" instead)" $ERR_CMDLINE + fi + do_json=true + ;; # DEFINITION of JSONFILE is not arg specified: automagically in parse_hn_port() + # following does the same but additionally we can specify a log location + --jsonfile|--jsonfile=*|-oj|-oj=*) + "$do_pretty_json" && fatal "flat and pretty JSON output are mutually exclusive" $ERR_CMDLINE + "$do_json" && fatal "--json and --jsonfile are mutually exclusive" $ERR_CMDLINE + JSONFILE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + do_json=true + ;; + --json-pretty) + "$do_json" && fatal "flat and pretty JSON output are mutually exclusive" $ERR_CMDLINE + "$do_pretty_json" && fatal "--json-pretty and --jsonfile-pretty are mutually exclusive" $ERR_CMDLINE + if [[ "$2" =~ \.(json|JSON)$ ]]; then + fatal "No file name allowed after \"--json\" (use \"--jsonfile-pretty\" instead)" $ERR_CMDLINE + fi + do_pretty_json=true + ;; + --jsonfile-pretty|--jsonfile-pretty=*|-oJ|-oJ=*) + "$do_json" && fatal "flat and pretty JSON output are mutually exclusive" $ERR_CMDLINE + "$do_pretty_json" && fatal "--json-pretty and --jsonfile-pretty are mutually exclusive" $ERR_CMDLINE + JSONFILE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + do_pretty_json=true + ;; + --severity|--severity=*) + set_severity_level "$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --hints) + GIVE_HINTS=true + ;; + --csv) + "$do_csv" && fatal "two --csv* arguments" $ERR_CMDLINE + if [[ "$2" =~ \.(csv|CSV)$ ]]; then + fatal "No file name allowed after \"--csv\" (use \"--csvfile\" instead)" $ERR_CMDLINE + fi + do_csv=true + ;; # DEFINITION of CSVFILE is not arg specified: automagically in parse_hn_port() + # following does the same but additionally we can specify a log location + --csvfile|--csvfile=*|-oC|-oC=*) + "$do_csv" && fatal "two --csv* arguments" $ERR_CMDLINE + CSVFILE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + do_csv=true + ;; + --html) + "$do_html" && fatal "two --html* arguments" $ERR_CMDLINE + if [[ "$2" =~ \.(htm|html|HTM|HTML)$ ]]; then + fatal "No file name allowed after \"--html\" (use \"--htmlfile\" instead)" $ERR_CMDLINE + fi + do_html=true + ;; # DEFINITION of HTMLFILE is not arg specified: automagically in parse_hn_port() + # following does the same but additionally we can specify a file location + --htmlfile|--htmlfile=*|-oH|-oH=*) + "$do_html" && fatal "two --html* arguments" $ERR_CMDLINE + HTMLFILE="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + do_html=true + ;; + --outfile|--outfile=*|-oa|-oa=*) + ( "$do_html" || "$do_json" || "$do_pretty_json" || "$do_csv" || "$do_logging" ) && fatal "check your arguments four multiple file output options" $ERR_CMDLINE + outfile_arg="$(parse_opt_equal_sign "$1" "$2")" + if [[ "$outfile_arg" != "auto" ]]; then + if [[ -d "$outfile_arg" ]]; then + HTMLFILE="$outfile_arg" + CSVFILE="$outfile_arg" + JSONFILE="$outfile_arg" + LOGFILE="$outfile_arg" + else + HTMLFILE="$outfile_arg.html" + CSVFILE="$outfile_arg.csv" + JSONFILE="$outfile_arg.json" + LOGFILE="$outfile_arg.log" + fi + fi + [[ $? -eq 0 ]] && shift + do_html=true + do_json=true + do_csv=true + do_logging=true + ;; + --outFile|--outFile=*|-oA|-oA=*) + ( "$do_html" || "$do_json" || "$do_pretty_json" || "$do_csv" || "$do_logging" ) && fatal "check your arguments four multiple file output options" $ERR_CMDLINE + outfile_arg="$(parse_opt_equal_sign "$1" "$2")" + if [[ "$outfile_arg" != "auto" ]]; then + if [[ -d "$outfile_arg" ]]; then + HTMLFILE="$outfile_arg" + CSVFILE="$outfile_arg" + JSONFILE="$outfile_arg" + LOGFILE="$outfile_arg" + else + HTMLFILE="$outfile_arg.html" + CSVFILE="$outfile_arg.csv" + JSONFILE="$outfile_arg.json" + LOGFILE="$outfile_arg.log" + fi + fi + [[ $? -eq 0 ]] && shift + do_html=true + do_pretty_json=true + do_csv=true + do_logging=true + ;; + --append) + APPEND=true + ;; + --outprefix) + FNAME_PREFIX="$(parse_opt_equal_sign "$1" "$2")" + if [[ $? -eq 0 ]]; then + shift + case "$(get_last_char "$FNAME_PREFIX")" in + '.') ;; + '-') ;; + '_') ;; + *) FNAME_PREFIX="${FNAME_PREFIX}-" ;; + esac + fi + ;; + --openssl|--openssl=*) + OPENSSL="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --openssl-timeout|--openssl-timeout=*) + OPENSSL_TIMEOUT="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --connect-timeout|--connect-timeout=*) + CONNECT_TIMEOUT="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --mapping|--mapping=*) + cipher_mapping="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + case "$cipher_mapping" in + no-openssl) DISPLAY_CIPHERNAMES="rfc-only" ;; + no-rfc|no-iana) DISPLAY_CIPHERNAMES="openssl-only" ;; + openssl) DISPLAY_CIPHERNAMES="openssl" ;; + rfc|iana) DISPLAY_CIPHERNAMES="rfc" ;; + *) tmln_warning "\nmapping can only be \"no-openssl\", \"no-iana\"(\"no-rfc\"), \"openssl\" or \"iana\"(\"rfc\")" + help 1 ;; + esac + ;; + --proxy|--proxy=*) + PROXY="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + --phone-out) + PHONE_OUT=true + ;; + -6) # doesn't work automagically. My versions have -DOPENSSL_USE_IPV6, CentOS/RHEL/FC do not + HAS_IPv6=true + ;; + --has[-_]dhbits|--has[_-]dh[-_]bits) + # Should work automagically. Helper switch for CentOS,RHEL+FC w openssl server temp key backport (version 1.0.1), see #190 + HAS_DH_BITS=true + ;; + --ssl_native|--ssl-native) + SSL_NATIVE=true + ;; + --basicauth|--basicauth=*) + BASICAUTH="$(parse_opt_equal_sign "$1" "$2")" + [[ $? -eq 0 ]] && shift + ;; + (--) shift + break + ;; + (-*) tmln_warning "0: unrecognized option \"$1\"" 1>&2; + help 1 + ;; + (*) break + ;; + esac + shift + done + + # Show usage if no further options were specified + if [[ -z "$1" ]] && [[ -z "$FNAME" ]] && ! "$do_display_only"; then + fatal "URI missing" $ERR_CMDLINE + else + # What is left here should be the URI. + URI="$1" + [[ -n "$2" ]] && fatal "URI comes last" $ERR_CMDLINE + fi + + # Now spot some incompatibilities in cmdlines + [[ $CMDLINE_IP == one ]] && [[ "$NODNS" == none ]] && fatal "\"--ip=one\" and \"--nodns=none\" don't work together" $ERR_CMDLINE + [[ $CMDLINE_IP == one ]] && ( is_ipv4addr "$URI" || is_ipv6addr "$URI" ) && fatal "\"--ip=one\" plus supplying an IP address doesn't work" $ERR_CMDLINE + "$do_mx_all_ips" && [[ "$NODNS" == none ]] && fatal "\"--mx\" and \"--nodns=none\" don't work together" $ERR_CMDLINE + [[ -n "$CONNECT_TIMEOUT" ]] && [[ "$MASS_TESTING_MODE" == parallel ]] && fatal "Parallel mass scanning and specifying connect timeouts currently don't work together" $ERR_CMDLINE + + ADDITIONAL_CA_FILES="${ADDITIONAL_CA_FILES//,/ }" + for fname in $ADDITIONAL_CA_FILES; do + [[ -s "$fname" ]] || fatal "CA file \"$fname\" does not exist" $ERR_RESOURCE + grep -q "BEGIN CERTIFICATE" "$fname" || fatal "\"$fname\" is not CA file in PEM format" $ERR_RESOURCE + done + + [[ "$DEBUG" -ge 5 ]] && debug_globals + + count_do_variables + [[ $? -eq 0 ]] && set_scanning_defaults + CMDLINE_PARSED=true +} + + +# connect call from openssl needs ipv6 in square brackets +nodeip_to_proper_ip6() { + local len_nodeip=0 + + if is_ipv6addr $NODEIP; then + ${UNBRACKTD_IPV6} || NODEIP="[$NODEIP]" + len_nodeip=${#NODEIP} + CORRECT_SPACES="$(printf -- " "'%.s' $(eval "echo {1.."$((len_nodeip - 17))"}"))" + # IPv6 addresses are longer, this variable takes care that "further IP" and "Service" is properly aligned + fi +} + + +reset_hostdepended_vars() { + TLS_EXTENSIONS="" + PROTOS_OFFERED="" + CURVES_OFFERED="" + OPTIMAL_PROTO="" + ALL_FAILED_SOCKETS=true + SERVER_SIZE_LIMIT_BUG=false +} + +# Rough estimate, in the future we maybe want to make use of nano secs (%N). Note this +# is for performance debugging purposes (MEASURE_TIME=yes), eye candy is not important. +# +stopwatch() { + local new_delta + local column=$((COLUMNS - 0)) # for future adjustments + + "$MEASURE_TIME" || return + new_delta=$(( $(date +%s) - LAST_TIME )) + printf "%${column}s" "$1: $new_delta" + [[ -e "$MEASURE_TIME_FILE" ]] && echo "$1 : $new_delta " >> "$MEASURE_TIME_FILE" + LAST_TIME=$(( new_delta + LAST_TIME )) +} + + +# arg1(optional): "init" --> just initializing. Or: STARTTLS protocol +lets_roll() { + local -i ret=0 + local section_number=0 + + if [[ "$1" == init ]]; then + # called once upfront to be able to measure preparation time b4 everything starts + START_TIME=$(date +%s) + LAST_TIME=$START_TIME + [[ -n "$MEASURE_TIME_FILE" ]] && >"$MEASURE_TIME_FILE" + return 0 + fi + stopwatch initialized + + [[ -z "$NODEIP" ]] && fatal "$NODE doesn't resolve to an IP address" $ERR_DNSLOOKUP + nodeip_to_proper_ip6 + reset_hostdepended_vars + determine_rdns # Returns always zero or has already exited if fatal error occurred + stopwatch determine_rdns + + ((SERVER_COUNTER++)) + datebanner " Start" + determine_service "$1" # STARTTLS service? Other will be determined here too. Returns 0 if test connect was ok or has already exited if fatal error occurred + # determine_service() can return 1, it indicates that this IP cannot be reached but there are more IPs to check + if [[ $? -eq 0 ]] ; then + # "secret" devel options --devel: + if "$do_tls_sockets"; then + if [[ "$TLS_LOW_BYTE" == 22 ]]; then + sslv2_sockets "" "true" + else + if [[ "$TLS_LOW_BYTE" == 04 ]]; then + if "$CERT_COMPRESSION"; then + # See PR #1279 + [[ $DEBUG -eq 3 ]] && tmln_out "including TLS extension certificate compression" + tls_sockets "$TLS_LOW_BYTE" "$HEX_CIPHER" "all+" "00,1b, 00,03, 02, 00,01" + else + tls_sockets "$TLS_LOW_BYTE" "$HEX_CIPHER" "ephemeralkey" + fi + else + tls_sockets "$TLS_LOW_BYTE" "$HEX_CIPHER" "all" + fi + fi + echo $? + exit $ALLOK; + fi + if "$do_cipher_match"; then + # we will have an invalid JSON with no if statement + fileout_section_header $section_number false + run_cipher_match ${single_cipher} + stopwatch run_cipher_match + else + fileout_section_header $section_number false && ((section_number++)) + determine_sizelimitbug + fileout_section_footer false + + ((section_number++)) + # all top level functions now following have the prefix "run_" + fileout_section_header $section_number false && ((section_number++)) + "$do_protocols" && { + run_protocols; ret=$(($? + ret)); stopwatch run_protocols; + run_npn; ret=$(($? + ret)); stopwatch run_npn; + run_alpn; ret=$(($? + ret)); stopwatch run_alpn; + } + fileout_section_header $section_number true && ((section_number++)) + "$do_grease" && { run_grease; ret=$(($? + ret)); stopwatch run_grease; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_cipherlists" && { run_cipherlists; ret=$(($? + ret)); stopwatch run_cipherlists; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_pfs" && { run_pfs; ret=$(($? + ret)); stopwatch run_pfs; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_server_preference" && { run_server_preference; ret=$(($? + ret)); stopwatch run_server_preference; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_server_defaults" && { run_server_defaults; ret=$(($? + ret)); stopwatch run_server_defaults; } + + if "$do_header"; then + #TODO: refactor this into functions + fileout_section_header $section_number true && ((section_number++)) + if [[ $SERVICE == HTTP ]]; then + run_http_header "$URL_PATH"; ret=$(($? + ret)) + run_http_date "$URL_PATH"; ret=$(($? + ret)) + run_hsts "$URL_PATH"; ret=$(($? + ret)) + run_hpkp "$URL_PATH"; ret=$(($? + ret)) + run_server_banner "$URL_PATH"; ret=$(($? + ret)) + run_appl_banner "$URL_PATH"; ret=$(($? + ret)) + run_cookie_flags "$URL_PATH"; ret=$(($? + ret)) + run_security_headers "$URL_PATH"; ret=$(($? + ret)) + run_rp_banner "$URL_PATH"; ret=$(($? + ret)) + stopwatch do_header + fi + else + ((section_number++)) + fi + + # vulnerabilities + if [[ $VULN_COUNT -gt $VULN_THRESHLD ]] || "$do_vulnerabilities"; then + outln; pr_headlineln " Testing vulnerabilities " + outln + fi + + fileout_section_header $section_number true && ((section_number++)) + "$do_heartbleed" && { run_heartbleed; ret=$(($? + ret)); stopwatch run_heartbleed; } + "$do_ccs_injection" && { run_ccs_injection; ret=$(($? + ret)); stopwatch run_ccs_injection; } + "$do_ticketbleed" && { run_ticketbleed; ret=$(($? + ret)); stopwatch run_ticketbleed; } + "$do_robot" && { run_robot; ret=$(($? + ret)); stopwatch run_robot; } + "$do_renego" && { run_renego; ret=$(($? + ret)); stopwatch run_renego; } + "$do_crime" && { run_crime; ret=$(($? + ret)); stopwatch run_crime; } + "$do_breach" && { run_breach "$URL_PATH" ; ret=$(($? + ret)); stopwatch run_breach; } + "$do_ssl_poodle" && { run_ssl_poodle; ret=$(($? + ret)); stopwatch run_ssl_poodle; } + "$do_tls_fallback_scsv" && { run_tls_fallback_scsv; ret=$(($? + ret)); stopwatch run_tls_fallback_scsv; } + "$do_sweet32" && { run_sweet32; ret=$(($? + ret)); stopwatch run_sweet32; } + "$do_freak" && { run_freak; ret=$(($? + ret)); stopwatch run_freak; } + "$do_drown" && { run_drown ret=$(($? + ret)); stopwatch run_drown; } + "$do_logjam" && { run_logjam; ret=$(($? + ret)); stopwatch run_logjam; } + "$do_beast" && { run_beast; ret=$(($? + ret)); stopwatch run_beast; } + "$do_lucky13" && { run_lucky13; ret=$(($? + ret)); stopwatch run_lucky13; } + "$do_rc4" && { run_rc4; ret=$(($? + ret)); stopwatch run_rc4; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_allciphers" && { run_allciphers; ret=$(($? + ret)); stopwatch run_allciphers; } + "$do_cipher_per_proto" && { run_cipher_per_proto; ret=$(($? + ret)); stopwatch run_cipher_per_proto; } + + fileout_section_header $section_number true && ((section_number++)) + "$do_client_simulation" && { run_client_simulation; ret=$(($? + ret)); stopwatch run_client_simulation; } + fi + fileout_section_footer true + fi + + outln + calc_scantime + datebanner " Done" + + # reset the failed connect counter as we are finished + NR_SOCKET_FAIL=0 + NR_OSSL_FAIL=0 + + "$MEASURE_TIME" && printf "$1: %${COLUMNS}s\n" "$SCAN_TIME" + [[ -e "$MEASURE_TIME_FILE" ]] && echo "Total : $SCAN_TIME " >> "$MEASURE_TIME_FILE" + + return $ret +} + + + diff --git a/overlay/Linux/usr/local/bin/python.sh b/overlay/Linux/usr/local/bin/python.sh new file mode 100755 index 0000000..bde2581 --- /dev/null +++ b/overlay/Linux/usr/local/bin/python.sh @@ -0,0 +1,60 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +[ -z "$PYVER" ] && PYVER=3 +export PYVER +#[ -f /usr/local/bin/usr_local_tput.bash ] && \ +# . /usr/local/bin/usr_local_tput.bash +ROLE=base +declare -a RARGS +RARGS=("$@") + +[ -f /usr/local/bin/pyver.sh ] && . /usr/local/bin/pyver.sh || { + [ -f /usr/local/etc/testforge/testforge.bash ] && \ + . /usr/local/etc/testforge/testforge.bash >/dev/null + + P="BASE_PYTHON${PYVER}_MINOR" + PYTHON_MINOR="$(eval echo \$$P)" + [ -n "$PYTHON_MINOR" ] || \ + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + + if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR ] ; then + LIB=lib + elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR ] ; then + LIB=lib64 + elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR >&2 ; exit 1 + fi + } + +if [ -z "$PYTHONPATH" ] ; then + # sic - failsafe + export PYTHONPATH=/usr/lib/python$PYTHON_MINOR/site-packages +fi + +if [ -d /usr/$LIB/python$PYTHON_MINOR/site-packages/llvmlite/binding ] ; then + if [ -z "$LD_LIBRARY_PATH" ] ; then + export LD_LIBRARY_PATH=/usr/$LIB/python$PYTHON_MINOR/site-packages/llvmlite/binding + else + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/$LIB/python$PYTHON_MINOR/site-packages/llvmlite/binding + fi +fi + +# do I want $HOME/.local on the path? - no +# do I want local/lib/.../dist-packages on the path? - no is already is +# on Debian ~/.local/lib/python*/site-packages is already on the path +for elt in usr/local ; do + [ -d /$elt ] || continue + [ -d /$elt/bin ] && [[ ! $PATH =~ /$elt/bin ]] && \ + export PATH=$PATH:/$elt/bin + [ -e /$elt/$LIB ] || continue + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/$elt/$LIB + [ -d /$elt/$LIB/python$PYTHON_MINOR/site-packages ] || \ + mkdir /$elt/$LIB/python$PYTHON_MINOR/site-packages + [ ! -f /$elt/$LIB/python$PYTHON_MINOR/site-packages/__init__.py ] && \ + touch /$elt/$LIB/python$PYTHON_MINOR/site-packages/__init__.py + [[ ! $PYTHONPATH =~ /$elt/$LIB/python$PYTHON_MINOR/site-packages ]] && \ + export PYTHONPATH=$PYTHONPATH:/$elt/$LIB/python$PYTHON_MINOR/site-packages + done + +# echo INFO exec /usr/bin/python$PYTHON_MINOR -W ignore::DeprecationWarning "${RARGS[@]}" +/usr/bin/python$PYTHON_MINOR -W ignore::DeprecationWarning "${RARGS[@]}" diff --git a/overlay/Linux/usr/local/bin/python2.sh b/overlay/Linux/usr/local/bin/python2.sh new file mode 100755 index 0000000..9217d3d --- /dev/null +++ b/overlay/Linux/usr/local/bin/python2.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +ROLE=bash +export PYVER=2 +exec /usr/local/bin/python.sh "$@" diff --git a/overlay/Linux/usr/local/bin/python3.sh b/overlay/Linux/usr/local/bin/python3.sh new file mode 100755 index 0000000..e8d7685 --- /dev/null +++ b/overlay/Linux/usr/local/bin/python3.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +ROLE=bash +export PYVER=3 +/usr/local/bin/python.sh "$@" diff --git a/overlay/Linux/usr/local/bin/pyver.sh b/overlay/Linux/usr/local/bin/pyver.sh new file mode 100755 index 0000000..a79d93d --- /dev/null +++ b/overlay/Linux/usr/local/bin/pyver.sh @@ -0,0 +1,117 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +DBUG() { echo DEBUG $* >&2 ; } +INFO() { echo INFO $* >&2 ; } +WARN() { echo WARN $* >&2 ; } +ERROR() { echo ERROR $* >&2 ; } + +prog=`basename $0 .bash` +PREFIX=/usr/local +ROLE=base + +[ -z "$PYVER" ] && PYVER=3 # echo ERROR define PYVER >&2 && exit 1 +[ -z "$USER" ] && USER=$( id -un ) + +ini_file=/usr/local/etc/testforge/testforge.bash +if [ ! -f $ini_file ] ; then + # bootstrap + [ -d /usr/local/etc/testforge ] || mkdir -p /usr/local/etc/testforge + [ -x /usr/bin/python$PYVER ] && \ + echo export BASE_PYTHON${PYVER}_MINOR=`/usr/bin/python$PYVER --version|sed -e 's/.* //' -e 's/\.[0-9]*$//'` >> $ini_file +else + . $ini_file >/dev/null +fi + +set -- -x +P="BASE_PYTHON${PYVER}_MINOR" +PYTHON_MINOR="$(eval echo \$$P)" +[ -n "$PYTHON_MINOR" ] || \ + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR ] ; then + LIB=lib64 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR + exit 1 +fi + +if [ "$USER" = root ] ; then + [ -f /usr/$LIB/python$PYTHON_MINOR/sitecustomize.py ] && \ + mv /usr/$LIB/python$PYTHON_MINOR/sitecustomize.py /usr/$LIB/python$PYTHON_MINOR/sitecustomize.py.bak && \ + rm -f /usr/$LIB/python$PYTHON_MINOR/sitecustomize.pyc +fi + +if [ ! -d /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ ] ; then + if [ "$USER" = root ] ; then + mkdir -p /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ + chgrp adm /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ + chmod 775 /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ + else + ERROR Install error missing /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ + exit 2 + fi +fi + +[ -d /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ ] || \ + mkdir -p /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ +[ -f /usr/local/$LIB/python$PYTHON_MINOR/site-packages/sitecustomize.py ] || \ + cat > /usr/local/$LIB/python$PYTHON_MINOR/site-packages/sitecustomize.py << EOF +# -*- mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*- + +from __future__ import print_function + +import codecs +codecs._codecs_lookup = codecs.lookup +def lookup(s): + if s.endswith('-unix'): + s = s[:-5] + elif s.endswith('-dos'): + s = s[:-4] + return codecs._codecs_lookup(s) +codecs.lookup = lookup + +import os,sys +pyver = sys.version[:3] +notver = "3" if sys.version[:1] == '2' else '2' + +for elt in sys.path: + if elt.find('python' + notver) < 0: continue + p = os.environ.get('PYTHONPATH', '') + sys.stderr.write('WARN: sitecustomize.py PYTHONPATH=' +p +' sys.path=' +repr(sys.path) +'\n') + sys.stderr.write('"python' + notver +' in sys.path for ' +sys.executable +"\n") + raise RuntimeError('"python' + notver +' in sys.path for ' +sys.executable) + +dir=None +for elt in ['var', 'usr']: + if 'LD_LIBRARY_PATH' not in os.environ or 'PYTHONPATH' not in os.environ: + continue + dir = '/' + elt + '/local/bin' + if dir not in os.environ['PATH'].split(os.pathsep): + continue + dir = '/' + elt + "/local/$LIB" + if dir not in os.environ['LD_LIBRARY_PATH'].split(os.pathsep): + continue + dir = '/' + elt + "/local/$LIB/python" + pyver + '/site-packages' + # the bash wrapper will have put this on + if dir in os.environ['PYTHONPATH'].split(os.pathsep): + # print(repr(sys.path)) + if dir not in sys.path: + sys.path.insert(0, dir) + bin = '/' + elt + '/local/bin/python' + pyver[0] + if elt == 'var': + bin += '.bash' + else: + bin += '.sh' + if os.path.isfile(bin): + # print(sys.executable + '=' + bin) + sys.executable = bin + # var takes precedence + break + +if __name__ == '__main__': + print(sys.executable) +del os, sys, dir, elt, pyver +EOF diff --git a/overlay/Linux/usr/local/bin/testforge_get_inventory.bash b/overlay/Linux/usr/local/bin/testforge_get_inventory.bash new file mode 100755 index 0000000..8ef49e9 --- /dev/null +++ b/overlay/Linux/usr/local/bin/testforge_get_inventory.bash @@ -0,0 +1,36 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +# on stdout - messages on stderr + +prog=`basename $0 .bash` +PREFIX=/usr/local +ROLE=base +base=AnsI + +# quiet +[ "$#" -eq 0 ] && exit 1 +VARIABLE=$1 + +[ -f $PREFIX/etc/testforge/testforge.bash ] && . $PREFIX/etc/testforge/testforge.bash + +[ -n "$TESTFORGE_ANSIBLE_SRC" ] || TESTFORGE_ANSIBLE_SRC=/g/TestForge/src/ansible + +name=`hostname` + +if [ -d "$TESTFORGE_ANSIBLE_SRC" ] && [ -f $TESTFORGE_ANSIBLE_SRC/hosts.yml ] ; then + base=$name + ansible-inventory -i $TESTFORGE_ANSIBLE_SRC/hosts.yml \ + --playbook-dir=$TESTFORGE_ANSIBLE_SRC \ + --host=$base >> /tmp/${AnsI}$$.json 2> /tmp/${AnsI}$$.err + if [ $? -eq 0 -a -f /tmp/${AnsI}$$.json ] ; then + #!? export + VALUE=`jq .$VARIABLE &2 "DEBUG: $prog base=$base VALUE=$VALUE" + [ "$VALUE" = "null" ] && VALUE="" + echo -n "$VALUE" + fi + rm -f /tmp/${AnsI}$$.json +fi + +exit 0 diff --git a/overlay/Linux/usr/local/bin/testforge_local_bin.bash b/overlay/Linux/usr/local/bin/testforge_local_bin.bash new file mode 100755 index 0000000..e968bbc --- /dev/null +++ b/overlay/Linux/usr/local/bin/testforge_local_bin.bash @@ -0,0 +1,39 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/usr/local +ROLE=base +. /usr/local/bin/usr_local_base.bash || exit 2 + +umask 0022 +[ "$#" -gt 0 ] && inidir=$1 || inidir=/usr/local/etc/testforge +[ -f $inidir ] || mkdir -p $inidir + +if [ -f $inidir ] ; then + inifile=$inidir + else + inifile=$inidir/testforge.ini + fi + +# echo -n "DEBUG: $prog "; ls -l $inifile +[ -e $inifile ] || { ERROR no file $inifile ; exit 1 ; } +[ -s $inifile ] || { ERROR empty file $inifile ; exit 2 ; } + +bashfile=$( echo $inifile | sed -e 's/.ini$/.bash/' ) +if [ ! -s $bashfile ] || [ $inifile -nt $bashfile ] ; then + INFO "$inifile > $bashfile" + /usr/local/bin/fact_to_bash.bash < $inifile > $bashfile || exit 3 + echo 'export PATH=$PATH:/sbin:/usr/local/bin:/var/local/bin' >> $bashfile + echo -n "DEBUG: $prog bashfile"; ls -l $bashfile +fi + +ymlfile=$( echo $inifile | sed -e 's/.ini$/.yml/' ) +if [ ! -s $ymlfile ] || [ $inifile -nt $ymlfile ] ; then + INFO "$inifile > $ymlfile" + /usr/local/bin/fact_to_yaml.bash < $inifile > $ymlfile || exit 4 + echo -n "DEBUG: $prog ymlfile "; ls -l $ymlfile + fi +. $bashfile || exit $? + +exec bash /usr/local/bin/base_sheebang_after_pip.bash diff --git a/overlay/Linux/usr/local/bin/testforge_sheebang_after_pip.bash b/overlay/Linux/usr/local/bin/testforge_sheebang_after_pip.bash new file mode 100755 index 0000000..b02df96 --- /dev/null +++ b/overlay/Linux/usr/local/bin/testforge_sheebang_after_pip.bash @@ -0,0 +1,60 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +. /usr/local/bin/usr_local_base.bash || exit 2 +PREFIX=/usr/local +ROLE=base + +[ -z "$BASE_PYTHON2_MINOR" ] && \ + BASE_PYTHON2_MINOR=$( python2 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$BASE_PYTHON3_MINOR" ] && \ + BASE_PYTHON3_MINOR=$( python3 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +for PYTHON_MINOR in "$BASE_PYTHON2_MINOR" "$BASE_PYTHON3_MINOR" ; do + [ -z "$PYTHON_MINOR" ] && continue +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib64 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR/site-packages ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR/site-packages +fi +done + +umask 0022 +# [ "$#" -eq 0 ] && set -- $PREFIX/bin + +# FixMe? /usr/local/bin too? I think not, except for ours? + +for prefix in /usr/local /var/local ; do + cd $prefix/bin || exit 1 + #? ls -1d * | grep -v '~' | xargs file | grep -i python | sed -e 's/:.*//'|while read file ; do + ls -1 | grep -v '~' | xargs file | grep script | sed -e 's/:.*//' | \ + while read file ; do + head -1 $file | grep -q python || continue + head -1 $file | grep -q $prefix/python..bash && continue + base=$( echo $file | sed -e 's/\.bash$//' ) + under=$( echo $prefix | sed -e 's/^.//' -e 's@/@_@g' ) + if [ -h /etc/python-exec/$base.conf ] ; then + link=$( readlink /etc/python-exec/$base.conf ) + if [ "$link" = python2.conf ] ; then + sed -f $prefix/share/sed/${under}_python2.sed -i $file + else + sed -f $prefix/share/sed/${under}_python3.sed -i $file + fi + else + sed -f $prefix/share/sed/${under}_python2.sed -i $file + sed -f $prefix/share/sed/${under}_python3.sed -i $file + fi + # echo $file + done + + # failsafe - Eberly - no longer active + for elt in $BASE_PYTHON2_MINOR $BASE_PYTHON3_MINOR ; do + [ -f $prefix/${LIB}/python$elt/site-packages/site.py ] + # WARN missing $prefix/${LIB}/python$elt/site-packages/site.py + done + +done + +exit 0 diff --git a/overlay/Linux/usr/local/bin/toxcore_create-vm.bash b/overlay/Linux/usr/local/bin/toxcore_create-vm.bash new file mode 100755 index 0000000..495597b --- /dev/null +++ b/overlay/Linux/usr/local/bin/toxcore_create-vm.bash @@ -0,0 +1,425 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# from https://github.com/earlruby/create-vm/ + +[ -f /usr/local/bin/usr_local_tput.bash ] && \ + . /usr/local/bin/usr_local_tput.bash || { + DBUG() { echo DEBUG $* ; } + INFO() { echo INFO $* ; } + WARN() { echo WARN $* ; } + ERROR() { echo ERROR $* ; } + } + +prog=`basename $0 .bash` +PREFIX=/usr/local +ROLE=toxcore +export PATH=$PATH:$PREFIX/bin +have_genisoimage=true + +# create-vm - Quickly create guest VMs using cloud image files and cloud-init. + +# Copyright 2018-2023 Earl C. Ruby III +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# Set BOX_NBD_OVERLAY_DIR environment variable to override default storage location for VMs +HOSTNAME= +IMG_FQN= +AUTH_KEYS_FQN= +RAM=2048 +VCPUS=1 +STORAGE=20 +BRIDGE=virbr1 +MAC='52:54:00:1d:9c:6f' +VERBOSE= +PASS= +OSINFO=gentoo +password=ansible +OUTDIR=${BOX_NBD_OVERLAY_DIRs:-"${HOME}/vms/virsh"} + + +usage() +{ +cat << EOF +usage: $0 options + +Quickly create guest VMs using cloud image files and cloud-init. + +OPTIONS: + -h Show this message + -n Host name (required) + -i Full path and name of the base .img file to use (required) + -k Full path and name of the ansible user's public key file (required) + -d Output directory for the overlay qcow2 and related files + -r RAM in MB (defaults to ${RAM}) + -c Number of VCPUs (defaults to ${VCPUS}) + -s Amount of storage to allocate in GB (defaults to ${STORAGE}) + -b Bridge interface to use (defaults to ${BRIDGE}) + -m MAC address to use (default is to use a randomly-generated MAC) + -o OSINFO name like win11, win10, fedora32, gentoo, ububtu20 + -p ansible users plaintext password + -v Verbose +EOF +} + +while getopts "h:n:i:k:r:c:s:b:m:o:p:d:v" option; do + case "${option}" + in + h) + usage + exit 0 + ;; + n) HOSTNAME=${OPTARG};; + i) IMG_FQN=${OPTARG};; + k) AUTH_KEYS_FQN=${OPTARG};; + r) RAM=${OPTARG};; + c) VCPUS=${OPTARG};; + s) STORAGE=${OPTARG};; + b) BRIDGE=${OPTARG};; + m) MAC=${OPTARG};; + p) PASS=${OPTARG};; + o) password=${OPTARG};; + d) OUTDIR=${OPTARG}; + BOX_NBD_OVERLAY_DIR=${OUTDIR};; + v) VERBOSE=1;; + *) + ERROR unhandled option "${option}" ${OPTARG} + usage + exit 1 + ;; + esac +done + +if [[ -z $HOSTNAME ]]; then + ERROR "Host name is required" + usage + exit 1 +fi + +if [[ -z $IMG_FQN ]]; then + ERROR "Base cloud image file name is required" + usage + exit 1 +fi + +if [[ -z $BOX_NBD_OVERLAY_DIR ]]; then + ERROR "Output image directory is required BOX_NBD_OVERLAY_DIR" + usage + exit 1 +fi + +if [[ -z $AUTH_KEYS_FQN ]]; then + ERROR "ansible public key file $AUTH_KEYS_FQN not found" + usage + exit 1 +fi + +if ! [[ -f $IMG_FQN ]]; then + ERROR "$IMG_FQN file not found" + usage + exit 1 +fi + +if [[ -n $VERBOSE ]]; then + INFO "Building ${HOSTNAME} in $BOX_NBD_OVERLAY_DIR" + set -xv +fi + +mkdir -p "$BOX_NBD_OVERLAY_DIR"/{images,xml,init,base} || exit 2 + +echo "Creating a qcow2 image file ${BOX_NBD_OVERLAY_DIR}/images/${HOSTNAME}.img that uses the cloud image file ${IMG_FQN} as its base" + +INFO qemu-img create -b "${IMG_FQN}" -f qcow2 -F qcow2 \ + "${BOX_NBD_OVERLAY_DIR}/images/${HOSTNAME}.img" "${STORAGE}G" +qemu-img create -b "${IMG_FQN}" -f qcow2 -F qcow2 \ + "${BOX_NBD_OVERLAY_DIR}/images/${HOSTNAME}.img" "${STORAGE}G" || \ + exit 3 + +echo "Creating meta-data file $BOX_NBD_OVERLAY_DIR/init/meta-data" +cat > "$BOX_NBD_OVERLAY_DIR/init/meta-data" << EOF +instance-id: ${HOSTNAME} +local-hostname: ${HOSTNAME} +EOF + +# echo "Creating meta-data file $BOX_NBD_OVERLAY_DIR/init/meta-data.json" +# cat > "$BOX_NBD_OVERLAY_DIR/init/meta-data.json" << EOF +cat > /dev/null << EOF +{ + "admin_pass": "root", + "availability_zone": "nova", + "hostname": "test.novalocal", + "launch_index": 0, + "name": "gentoo6", + "meta": { + "role": "webservers", + "essential": "false" + }, + "public_keys": { + "mykey": " ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRCJCQ1UD9QslWDSw5Pwsvba0Wsf1pO4how5BtNaZn0xLZpTq2nqFEJshUkd/zCWF7DWyhmNphQ8c+U+wcmdNVcg2pI1kPxq0VZzBfZ7cDwhjgeLsIvTXvU+HVRtsXh4c5FlUXpRjf/x+a3vqFRvNsRd1DE+5ZqQHbOVbnsStk3PZppaByMg+AZZMx56OUk2pZCgvpCwj6LIixqwuxNKPxmJf45RyOsPUXwCwkq9UD4me5jksTPPkt3oeUWw1ZSSF8F/141moWsGxSnd5NxCbPUWGoRfYcHc865E70nN4WrZkM7RFI/s5mvQtuj8dRL67JUEwvdvEDO0EBz21FV/iOracXd2omlTUSK+wYrWGtiwQwEgr4r5bimxDKy9L8UlaJZ+ONhLTP8ecTHYkaU1C75sLX9ZYd5YtqjiNGsNF+wdW6WrXrQiWeyrGK7ZwbA7lagSxIa7yeqnKDjdkcJvQXCYGLM9AMBKWeJaOpwqZ+dOunMDLd5VZrDCU2lpCSJ1M=" + + }, + "uuid": "83679162-1378-4288-a2d4-70e13ec132aa" +} +EOF + +# password=`openssl passwd -1 -stdin <<< $password` +echo "Creating user-data file $BOX_NBD_OVERLAY_DIR/init/user-data" +# https://techglimpse.com/nova-boot-instance-with-password/ +cat > "$BOX_NBD_OVERLAY_DIR/init/user-data" << EOF +#cloud-config +# password: ansible +# chpasswd: { expire: False } + +ssh_pwauth: true + +runcmd: + - "rc-update add qemu-guest-agent" + - "chmod 755 /etc/init.d/qemu-guest-agent" + - "/etc/init.d/qemu-guest-agent start" + - "echo /etc/init.d/qemu-guest-agent start >> /etc/rc.local" + +users: + - default + - name: ansible + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + groups: + - wheel + - adm + shell: /bin/bash + plain_text_password: "$password" + chpasswd: { expire: False } + homedir: /home/ansible + ssh_pwauth: true + ssh_authorized_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRCJCQ1UD9QslWDSw5Pwsvba0Wsf1pO4how5BtNaZn0xLZpTq2nqFEJshUkd/zCWF7DWyhmNphQ8c+U+wcmdNVcg2pI1kPxq0VZzBfZ7cDwhjgeLsIvTXvU+HVRtsXh4c5FlUXpRjf/x+a3vqFRvNsRd1DE+5ZqQHbOVbnsStk3PZppaByMg+AZZMx56OUk2pZCgvpCwj6LIixqwuxNKPxmJf45RyOsPUXwCwkq9UD4me5jksTPPkt3oeUWw1ZSSF8F/141moWsGxSnd5NxCbPUWGoRfYcHc865E70nN4WrZkM7RFI/s5mvQtuj8dRL67JUEwvdvEDO0EBz21FV/iOracXd2omlTUSK+wYrWGtiwQwEgr4r5bimxDKy9L8UlaJZ+ONhLTP8ecTHYkaU1C75sLX9ZYd5YtqjiNGsNF+wdW6WrXrQiWeyrGK7ZwbA7lagSxIa7yeqnKDjdkcJvQXCYGLM9AMBKWeJaOpwqZ+dOunMDLd5VZrDCU2lpCSJ1M=" +EOF + +echo "Adding keys from the public key file $AUTH_KEYS_FQN to the user-data file" +while IFS= read -r key; do + echo " - $key" >> "$BOX_NBD_OVERLAY_DIR/init/user-data" +done < <(grep -v '^ *#' < "$AUTH_KEYS_FQN") + +VM_IMAGE_DIR="$BOX_NBD_OVERLAY_DIR" +#old . /usr/local/bin/toxcore_create-ga.sh || exit 4 +cat > "$BOX_NBD_OVERLAY_DIR/init/user-data" << \EOF +#!/bin/bash + +# typically only executes on first boot + +echo "############# user_data executing ##############" + +#grep gentoo /etc/shadow + +sed -e 's/#-:ALL:ALL/+:gentoo:ALL/' -i /etc/security/access.conf +PW=`echo $PASS | openssl passwd -1 --stdin ` +grep -q ^gentoo /etc/passwd || \ + useradd --gid 4 --uid 1000 --home-dir /home/gentoo \ + --comment Gentoo --password "$PW" \ + -G adm,wheel --shell /bin/bash gentoo + +usermod --password "$PW" -G adm,wheel gentoo +# root +usermod --password '$1$1Ho4y/W8$5VymfKWWAhLxwkkPZiWTZ1' root +# unlock account +passwd -u gentoo +passwd -u root + +sed -e 's/# %wheel /%wheel /' -i /etc/sudoers +sed -e 's/PasswordAuthentication no/PasswordAuthentication yes/' -i /etc//ssh/sshd_config +sed -e 's/PermitRootLogin.*/PermitRootLogin yes/' -i /etc//ssh/sshd_config + +grep net.ipv4.ip_forward=1 /etc/sysctl.conf || \ + echo net.ipv4.ip_forward=1 >> /etc/sysctl.conf + +cd /etc/init.d +[ -e net.eth0 ] || ln -s net.lo net.eth0 + +for elt in i o linuxPen19 ; do + grep -q $elt /etc/fstab && continue + echo "$elt /mnt/$elt virtiofs defaults 0 0" >> /etc/fstab +done + +#grep gentoo /etc/shadow +EOF + +echo "Generating the cidata ISO file $BOX_NBD_OVERLAY_DIR/images/${HOSTNAME}-cidata.iso" +( + cd "$BOX_NBD_OVERLAY_DIR/init/" + genisoimage \ + -output "$BOX_NBD_OVERLAY_DIR/images/${HOSTNAME}-cidata.img" \ + -volid cidata \ + -rational-rock \ + -joliet \ + -input-charset utf-8 \ + user-data meta-data +) || exit 5 + +MACCMD= +if [[ -n $MAC ]]; then + MACCMD="--mac=${MAC}" +fi + +[ -f ${BOX_NBD_OVERLAY_DIR}/images/${HOSTNAME}.img ] || exit 5 +[ -f $BOX_NBD_OVERLAY_DIR/images/${HOSTNAME}-cidata.img ] || exit 6 + +# libvirt.libvirtError: /usr/lib/qemu/qemu-bridge-helper --use-vnet --br=-c --fd=31: failed to communicate with bridge helper: stderr=failed to parse default acl file `/etc/qemu/bridge.conf' +if [ ! -f "/etc/qemu/bridge.conf" ] ; then + echo allow $BRIDGE >> "/etc/qemu/bridge.conf" +elif ! grep $BRIDGE "/etc/qemu/bridge.conf" ; then + echo allow $BRIDGE >> "/etc/qemu/bridge.conf" +fi +if [ $BRIDGE = virbr0 ] ; then + network=default + # 192.168.122.248/24 +elif [ $BRIDGE = virbr1 ] ; then + network=Whonix-External +else + WARN unrecognized $BRIDGE +fi +if [ "$network" != '' ] ; then + virsh net-list | grep -q $network || \ + virsh net-start $network +else + network=default +fi +file=/etc/libvirt/qemu/networks/$network.xml +if [ ! -f $file ] ; then + WARN no network file $file +elif ! grep ' in network file' $file +fi + +declare -a LARGS +LARGS=( + --name="${HOSTNAME}" \ + --osinfo "$OSINFO" \ + --import \ + --disk "path=${BOX_NBD_OVERLAY_DIR}/images/${HOSTNAME}.img,format=qcow2" \ + --disk "path=$BOX_NBD_OVERLAY_DIR/images/${HOSTNAME}-cidata.img,device=cdrom" \ + --ram="${RAM}" \ + --vcpus="${VCPUS}" \ + --autostart \ + --hvm \ + --arch x86_64 \ + --accelerate \ + --check-cpu \ + --force \ + --watchdog=default \ + --channel type=spicevmc,target.type=virtio,target.name=com.redhat.spice.0 \ + --channel type=unix,target.type=virtio,target.name=org.qemu.guest_agent.0 \ + --rng /dev/urandom \ + --os-variant detect=on,name=$OSINFO \ + --noautoconsole \ + ) + +# not type=qemu-vdagent + +NETWORK="--network network=$network,model=virtio" +if [ -n "$NETWORK" ] ; then +LARGS+=( + $NETWORK \ +) +fi +LARGS+=( +# --graphics spice,listen=socket \ + --boot init=/sbin/init + --console pty + --video vga + --memorybacking source.type=memfd,access.mode=shared + --filesystem /,/mnt/linuxPen19 \ +) + + +INFO virt-install "${LARGS[@]}" +# squelch warnings +python3.sh `which virt-install` "${LARGS[@]}" || exit 7 +# --debug +#? --shmem name=shmem_server,type="memfd",mode="shared" +# --shmem name=shmem0 ivshmem device is not supported with this QEMU binary + +# was --graphics vnc,listen=0.0.0.0 +# --osinfo "$OSINFO" \ + +# Make a backup of the VM's XML definition file +virsh dumpxml "${HOSTNAME}" > "${BOX_NBD_OVERLAY_DIR}/xml/${HOSTNAME}.xml" || exit 8 +INFO wrote xml `ls -l ${BOX_NBD_OVERLAY_DIR}/xml/${HOSTNAME}.xml` + +if [ -n "$VERBOSE" ]; then + set +xv +fi + +# problems: type=qemu-vdagent unix unix=on +# problems: type="spicevmc +# ERROR Unknown --channel options: ['unix'] +cp "${BOX_NBD_OVERLAY_DIR}/xml/${HOSTNAME}.xml" \ + "${BOX_NBD_OVERLAY_DIR}/xml/${HOSTNAME}.xml".new + +cat > /tmp/ga.works < + + +
+ +EOF +cat > /tmp/sp.works < + +
+ +EOF + +# Show running VMs +virsh list | grep "${HOSTNAME}" && INFO "${HOSTNAME}" || { + ERROR "${HOSTNAME}" ; exit 9$? ; } + +# use the following passwordless demonstration key for testing or +# replace with your own key pair +# +# -----BEGIN OPENSSH PRIVATE KEY----- +# b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +# NhAAAAAwEAAQAAAYEA0QiQkNVA/ULJVg0sOT8LL22tFrH9aTuIaMOQbTWmZ9MS2aU6tp6h +# RCbIVJHf8wlhew1soZjaYUPHPlPsHJnTVXINqSNZD8atFWcwX2e3A8IY4Hi7CL0171Ph1U +# bbF4eHORZVF6UY3/8fmt76hUbzbEXdQxPuWakB2zlW57ErZNz2aaWgcjIPgGWTMeejlJNq +# WQoL6QsI+iyIsasLsTSj8ZiX+OUcjrD1F8AsJKvVA+JnuY5LEzz5Ld6HlFsNWUkhfBf9eN +# ZqFrBsUp3eTcQmz1FhqEX2HB3POuRO9JzeFq2ZDO0RSP7OZr0Lbo/HUS+uyVBML3bxAztB +# Ac9tRVf4jq2nF3dqJpU1EivsGK1hrYsEMBIK+K+W4psQysvS/FJWiWfjjYS0z/HnEx2JGl +# NQu+bC1/WWHeWLao4jRrDRfsHVulq160Ilnsqxiu2cGwO5WoEsSGu8nqpyg43ZHCb0FwmB +# izPQDASlniWjqcKmfnTrpzAy3eVWawwlNpaQkidTAAAFgGKSj8diko/HAAAAB3NzaC1yc2 +# EAAAGBANEIkJDVQP1CyVYNLDk/Cy9trRax/Wk7iGjDkG01pmfTEtmlOraeoUQmyFSR3/MJ +# YXsNbKGY2mFDxz5T7ByZ01VyDakjWQ/GrRVnMF9ntwPCGOB4uwi9Ne9T4dVG2xeHhzkWVR +# elGN//H5re+oVG82xF3UMT7lmpAds5VuexK2Tc9mmloHIyD4BlkzHno5STalkKC+kLCPos +# iLGrC7E0o/GYl/jlHI6w9RfALCSr1QPiZ7mOSxM8+S3eh5RbDVlJIXwX/XjWahawbFKd3k +# 3EJs9RYahF9hwdzzrkTvSc3hatmQztEUj+zma9C26Px1EvrslQTC928QM7QQHPbUVX+I6t +# pxd3aiaVNRIr7BitYa2LBDASCvivluKbEMrL0vxSVoln442EtM/x5xMdiRpTULvmwtf1lh +# 3li2qOI0aw0X7B1bpatetCJZ7KsYrtnBsDuVqBLEhrvJ6qcoON2Rwm9BcJgYsz0AwEpZ4l +# o6nCpn5066cwMt3lVmsMJTaWkJInUwAAAAMBAAEAAAGAEuz77Hu9EEZyujLOdTnAW9afRv +# XDOZA6pS7yWEufjw5CSlMLwisR83yww09t1QWyvhRqEyYmvOBecsXgaSUtnYfftWz44apy +# /gQYvMVELGKaJAC/q7vjMpGyrxUPkyLMhckALU2KYgV+/rj/j6pBMeVlchmk3pikYrffUX +# JDY990WVO194Dm0buLRzJvfMKYF2BcfF4TvarjOXWAxSuR8www050oJ8HdKahW7Cm5S0po +# FRnNXFGMnLA62vN00vJW8V7j7vui9ukBbhjRWaJuY5rdG/UYmzAe4wvdIEnpk9xIn6JGCp +# FRYTRn7lTh5+/QlQ6FXRP8Ir1vXZFnhKzl0K8Vqh2sf4M79MsIUGAqGxg9xdhjIa5dmgp8 +# N18IEDoNEVKUbKuKe/Z5yf8Z9tmexfH1YttjmXMOojBvUHIjRS5hdI9NxnPGRLY2kjAzcm +# gV9Rv3vtdF/+zalk3fAVLeK8hXK+di/7XTvYpfJ2EZBWiNrTeagfNNGiYydsQy3zjZAAAA +# wBNRak7UrqnIHMZn7pkCTgceb1MfByaFtlNzd+Obah54HYIQj5WdZTBAITReMZNt9S5NAR +# M8sQB8UoZPaVSC3ppILIOfLhs6KYj6RrGdiYwyIhMPJ5kRWF8xGCLUX5CjwH2EOq7XhIWt +# MwEFtd/gF2Du7HUNFPsZGnzJ3e7pDKDnE7w2khZ8CIpTFgD769uBYGAtk45QYTDo5JroVM +# ZPDq08Gb/RhIgJLmIpMwyreVpLLLe8SwoMJJ+rihmnJZxO8gAAAMEA0lhiKezeTshht4xu +# rWc0NxxD84a29gSGfTphDPOrlKSEYbkSXhjqCsAZHd8S8kMr3iF6poOk3IWSvFJ6mbd3ie +# qdRTgXH9Thwk4KgpjUhNsQuYRHBbI59Mo+BxSI1B1qzmJSGdmCBL54wwzZmFKDQPQKPxiL +# n0Mlc7GooiDMjT1tbuW/O1EL5EqTRqwgWPTKhBA6r4PnGF150hZRIMooZkD2zX6b1sGojk +# QpvKkEykTwnKCzF5TXO8+wJ3qbcEo9AAAAwQD+Z0r68c2YMNpsmyj3ZKtZNPSvJNcLmyD/ +# lWoNJq3djJN4s2JbK8l5ARUdW3xSFEDI9yx/wpfsXoaqWnygP3PoFw2CM4i0EiJiyvrLFU +# r3JLfDUFRy3EJ24RsqbigmEsgQOzTl3xfzeFPfxFoOhokSvTG88PQji1AYHz5kA7p6Zfaz +# Ok11rJYIe7+e9B0lhku0AFwGyqlWQmS/MhIpnjHIk5tP4heHGSmzKQWJDbTskNWd6aq1G7 +# 6HWfDpX4HgoM8AAAALaG9sbWFuYkBhcmM= +# -----END OPENSSH PRIVATE KEY----- +# diff --git a/overlay/Linux/usr/local/bin/toxcore_delete-vm.bash b/overlay/Linux/usr/local/bin/toxcore_delete-vm.bash new file mode 100755 index 0000000..f693250 --- /dev/null +++ b/overlay/Linux/usr/local/bin/toxcore_delete-vm.bash @@ -0,0 +1,61 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# from https://github.com/earlruby/create-vm/ + +[ -f /usr/local/bin/usr_local_tput.bash ] && \ + . /usr/local/bin/usr_local_tput.bash || { + DBUG() { echo DEBUG $* ; } + INFO() { echo INFO $* ; } + WARN() { echo WARN $* ; } + ERROR() { echo ERROR $* ; } + } + +prog=`basename $0 .bash` +PREFIX=/usr/local +ROLE=toxcore + +# delete-vm - Delete a virtual machine created with create-vm + +# Copyright 2018-2023 Earl C. Ruby III +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VM=$1 + +# Set VM_IMAGE_DIR environment variable to override default storage location for VMs +VM_IMAGE_DIR=${VM_IMAGE_DIR:-"${HOME}/vms/virsh"} + +VM_IMAGE="${VM_IMAGE_DIR}/images/$VM.img" +CI_IMAGE="${VM_IMAGE_DIR}/images/$VM-cidata.img" + +usage() +{ +cat << EOF +usage: $0 vmname +EOF +} + +if [[ -z $VM ]]; then + usage + exit 1 +fi + +if [[ -e $VM_IMAGE ]]; then + # VM exists + virsh destroy "$VM" + virsh undefine "$VM" + rm -fv "$VM_IMAGE" "$CI_IMAGE" +else + echo "Cannot find an VM image file named '$VM_IMAGE'. Attempting undefine..." + virsh undefine "$VM" +fi diff --git a/overlay/Linux/usr/local/bin/toxcore_get-vm-ip.bash b/overlay/Linux/usr/local/bin/toxcore_get-vm-ip.bash new file mode 100755 index 0000000..4b0f42a --- /dev/null +++ b/overlay/Linux/usr/local/bin/toxcore_get-vm-ip.bash @@ -0,0 +1,55 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- +# from https://github.com/earlruby/create-vm/ + +[ -f /usr/local/bin/usr_local_tput.bash ] && \ + . /usr/local/bin/usr_local_tput.bash || { + DBUG() { echo DEBUG $* ; } + INFO() { echo INFO $* ; } + WARN() { echo WARN $* ; } + ERROR() { echo ERROR $* ; } + } + +prog=`basename $0 .bash` +PREFIX=/usr/local +ROLE=toxcore + +. /usr/local/etc/testforge/testforge.bash +[ -n "$HOSTVMS_VAR_LOCAL" ] && PREFIX=$HOSTVMS_VAR_LOCAL + +# get-node-ip - Get the IP address of a VM managed by virsh. + +# Copyright 2018-2023 Earl C. Ruby III +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +usage() +{ +cat << EOF +usage: $0 hostname + +This script will take a virsh-managed VM hostname and return the IP address. +EOF +} + +HOSTNAME=$1 + +if [[ -z $HOSTNAME ]]; then + echo "ERROR: Hostname is required" + usage + exit 1 +fi + +MAC=$(virsh domiflist $HOSTNAME | awk '{ print $5 }' | tail -2 | head -1) +arp -a | grep $MAC | awk '{ print $2 }' | sed 's/[()]//g' +INFO MAC=$MAC arp=$arp diff --git a/overlay/Linux/usr/local/bin/usr_loc b/overlay/Linux/usr/local/bin/usr_loc new file mode 100755 index 0000000..a6abc2f --- /dev/null +++ b/overlay/Linux/usr/local/bin/usr_loc @@ -0,0 +1,70 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +[ -z "$prog" ] && prog=`basename $0 .bash` +[ -z "$USER" ] && USER=$( id -un ) +[ -z "$DEBUG" ] && DEBUG=0 + +if [ -n "$TERM" ] ; then + # vars that can be used to change font color + blue=$(tput setaf 6) + cyan=$(tput setaf 5) + green=$(tput setaf 2) + yellow=$(tput setaf 3) + red=$(tput setaf 1) + normal=$(tput sgr0) # default color +else + blue= + cyan= + green= + yellow= + red= + normal= +fi + +FTAL () { + echo ${red}FATL:${normal} $* + exit $1 +} +ftal () { FTAL >&2 "$@" ; } +panic () { FTAL >&2 "$@" ; } + +ERROR () { + echo ${red}EROR:${normal} $* + return 0 +} +error () { ERROR >&2 $* ; } + +WARN () { + echo ${yellow}WARN:${normal} $* + return 0 + +} +warn () { WARN >&2 $* ; } + +USAGE () { + echo ${yellow}USAGE:${normal} $* + return 0 + +} +usage () { USAGE >&2 $* ; } + +INFO () { + echo ${green}INFO:${normal} $* + return 0 +} +info () { INFO >&2 $* ; } + +DBUG () { + [ -z "$DEBUG" -o "$DEBUG" -eq 0 ] || echo ${blue}DBUG:${normal} $* + return 0 +} +dbug () { DBUG >&2 $* ; } +debug () { [ "$DEBUG" = "1" ] && echo >&2 ${cyan}DBUG:${normal} $* ; return 0 ; } + +usage () { + echo ${yellow}USAGE:${normal} $* + return 0 + +} +USAGE () { usage $* ; } diff --git a/overlay/Linux/usr/local/bin/usr_local_base.bash b/overlay/Linux/usr/local/bin/usr_local_base.bash new file mode 100755 index 0000000..24b6d53 --- /dev/null +++ b/overlay/Linux/usr/local/bin/usr_local_base.bash @@ -0,0 +1,36 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +[ -z "$prog" ] && prog=`basename $0 .bash` +[ -z "$USER" ] && USER=$( id -un ) +[ -f /usr/local/bin/usr_local_tput.bash ] && \ + . /usr/local/bin/usr_local_tput.bash + +## box_gentoo_emerge +box_gentoo_emerge () { + [ "$#" -lt 1 ] && return 0 + local elt + declare -a ARGS + for elt in "$@" ; do + [ -z "$elt" ] && continue + grep -q "^$elt$" /var/lib/portage/world && continue + ls /var/db/pkg/"$elt"-[0-9]* 2>/dev/null >/dev/null && continue + qlist -IsS "$elt" | grep -q "^$elt" && continue + equery l -f "^$elt$" | grep '^.I' && continue + ARGS+=($elt) + done + [ "${#ARGS[@]}" -eq 0 ] && exit 0 + INFO "${ARGS[@]}" + /usr/local/sbin/box_gentoo_emerge.bash "${ARGS[@]}" || return $? + return 0 +} + +base=usr_local_base +# DBUG 0=$0 +if [ -x /usr/bin/basename ] && [ $( /usr/bin/basename -- $0 ) = $base'.bash' -o $( basename -- $0 ) = $base'.sh' ] ; then + [ "$#" -eq 0 ] && exit 0 + [ "$#" -eq 1 ] && [ "$1" = '-h' -o "$1" = '--help' ] && \ + echo USAGE: $0 && grep '^[a-z].*()\|^## ' $0 | sed -e 's/().*//'|sort && exit 0 + eval "$@" + exit $? +fi diff --git a/overlay/Linux/usr/local/bin/usr_local_tput.bash b/overlay/Linux/usr/local/bin/usr_local_tput.bash new file mode 100755 index 0000000..db90d0f --- /dev/null +++ b/overlay/Linux/usr/local/bin/usr_local_tput.bash @@ -0,0 +1,76 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +[ -z "$TERM" ] && exit 0 +[ -z "$prog" ] && prog=`basename $0 .bash` +[ -z "$USER" ] && USER=$( id -un ) +[ -z "$DEBUG" ] && DEBUG=0 + +if [ -n "$TERM" ] ; then + # vars that can be used to change font color + blue=$(tput setaf 6) + cyan=$(tput setaf 5) + green=$(tput setaf 2) + yellow=$(tput setaf 3) + red=$(tput setaf 1) + normal=$(tput sgr0) # default color +else + blue= + cyan= + green= + yellow= + red= + normal= +fi + +FATL () { + [ $# -eq 1 ] && code=1 + [ $# -gt 1 ] && code=$1 && shift + echo ${red}FATL:${normal} $* + exit 1 +} +ftal () { FATL >&2 "$@" ; } +panic () { FATL >&2 "$@" ; } +PANIC () { FATL >&2 "$@" ; } + +ERROR () { + echo ${red}EROR:${normal} $* + return 0 +} +error () { ERROR >&2 $* ; } + +WARN () { + echo ${yellow}WARN:${normal} $* + return 0 + +} +warn () { WARN >&2 $* ; } + +USAGE () { + echo ${yellow}USAGE:${normal} $* + return 0 + +} +usage () { USAGE >&2 $* ; } + +INFO () { + echo ${green}INFO:${normal} $* + return 0 +} +info () { INFO >&2 $* ; } + +DBUG () { + [ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || echo ${blue}DBUG:${normal} $* + return 0 +} +dbug () { DBUG >&2 $* ; } +debug () { [ "$DEBUG" = "1" ] && echo >&2 ${cyan}DBUG:${normal} $* ; return 0 ; } + +usage () { + echo ${yellow}USAGE:${normal} $* + return 0 + +} +USAGE () { usage $* ; } + +ols_are_we_connected () { route | grep -q ^default ; return $? ; } diff --git a/overlay/Linux/usr/local/bin/yaml_to_bash.bash b/overlay/Linux/usr/local/bin/yaml_to_bash.bash new file mode 100755 index 0000000..024a3af --- /dev/null +++ b/overlay/Linux/usr/local/bin/yaml_to_bash.bash @@ -0,0 +1,35 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- +# pkuczynski/parse_yaml.sh + +prog=$( basename $0 .bash ) +ROLE=base + +# FixMe: lists should be space delineated not comma + +parse_yaml() { + local prefix + local depth + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + + [ "$#" -eq 2 ] && prefix=$2 || prefix="" + [ "$#" -gt 2 ] && depth=$3 || depth="" + + sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) { + if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; + for (i=0; i indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; for (i=0; i {"changed": false, "cmd": "/bin/lsblk --list --noheadings --paths --output NAME,UUID --exclude 2", "msg": "Timer expired after 30 seconds", "rc": 257} +gather_timeout = 120 +handler_includes_static = True +# Ansible by default will override variables in specific precedence orders, as described in Variables. +# When a variable of higher precedence wins, it will replace the other value. +#?! hash_behaviour = merged + +local_tmp = /var/tmp +# library = /usr/share/ansible +library = /g/TestForge/src/ansible/library +nocows = 0 +retry_files_enabled = False +roles_path = /g/TestForge/src/ansible/roles +# exists? +plugins_path = /g/TestForge/src/ansible/library/plugins + diff --git a/overlay/Linux/usr/local/etc/ansible/ansible.cfg/16optimize.cfg b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/16optimize.cfg new file mode 100644 index 0000000..475cb58 --- /dev/null +++ b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/16optimize.cfg @@ -0,0 +1,14 @@ +host_key_checking = False +# http://chrisbergeron.com/2018/06/08/ansible_performance_tuning/ +gathering = smart +fact_caching = jsonfile +fact_caching_connection = var/tmp/.ansible_fact_cache + +forks = 5 +timeout = 90 +# This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values +# improve performance with large playbooks at the expense of extra CPU load. Higher values are more +# suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage +# might be a concern. Default corresponds to the value hardcoded in 2.1: was 1 +internal_poll_interval=5 + diff --git a/overlay/Linux/usr/local/etc/ansible/ansible.cfg/28version.cfg b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/28version.cfg new file mode 100644 index 0000000..b8f4ceb --- /dev/null +++ b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/28version.cfg @@ -0,0 +1,5 @@ +# new 2.8 +# https://docs.ansible.com/ansible/latest/porting_guides/porting_guide_2.8.html +string_conversion_action = error +conditional_bare_variables = false + diff --git a/overlay/Linux/usr/local/etc/ansible/ansible.cfg/40ssh_connection.cfg b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/40ssh_connection.cfg new file mode 100644 index 0000000..457d29b --- /dev/null +++ b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/40ssh_connection.cfg @@ -0,0 +1,3 @@ +# https://thepracticalsysadmin.com/turbocharge-your-ansible-playbooks/ +[ssh_connection] +pipelining = True diff --git a/overlay/Linux/usr/local/etc/ansible/ansible.cfg/50inventory.cfg b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/50inventory.cfg new file mode 100644 index 0000000..fdf3266 --- /dev/null +++ b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/50inventory.cfg @@ -0,0 +1,3 @@ +[inventory] +enable_plugins = yaml + diff --git a/overlay/Linux/usr/local/etc/ansible/ansible.cfg/60chroot_connection.cfg b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/60chroot_connection.cfg new file mode 100644 index 0000000..3576c77 --- /dev/null +++ b/overlay/Linux/usr/local/etc/ansible/ansible.cfg/60chroot_connection.cfg @@ -0,0 +1,3 @@ +[chroot_connection] +# exe = /usr/local/sbin/base_chroot.bash +exe = /bin/chroot diff --git a/overlay/Linux/usr/local/etc/local.d/local.bash b/overlay/Linux/usr/local/etc/local.d/local.bash new file mode 100755 index 0000000..e5c9f41 --- /dev/null +++ b/overlay/Linux/usr/local/etc/local.d/local.bash @@ -0,0 +1,519 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +# prog=rc.local +PREFIX=/usr/local +ROLE=local + +export PATH=$PATH:/usr/local/sbin:/usr/local/bin + +if [ -x /sbin/rc-service ] ; then + local_rc_service () { rc-service "$@" ; } + local_rc_update () { rc-update "$@" ; } + elif [ -x /bin/systemctl ] ; then + local_rc_service () { + systemctl list-units --no-pager | grep -q $1 && \ + echo INFO: /usr/sbin/service "$@" || \ + echo WARN: /usr/sbin/service "$@" + /usr/sbin/service "$@" 2>/dev/null + return $? + } + local_rc_update () { + if [ "$#" -eq 0 ] ; then + systemctl list-units --no-pager + return $? + fi + dir=$1 ; shift ; + svc=$1 ; shift ; + if [ $dir = add ] ; then + dire=enable + elif [ $dir = del ] ; then + dire=disable + else + dire=$dir + fi + echo INFO: $prog systemctl --no-pager $dire $svc "$@" + systemctl --no-pager $dire $svc "$@" + return $? + } + elif [ -x /usr/sbin/service ] ; then + local_rc_service () { + /usr/sbin/service --status-all 2>&1 | grep -q $1 && \ + echo INFO: /usr/sbin/service "$@" || \ + echo WARN: /usr/sbin/service "$@" + /usr/sbin/service "$@" 2>/dev/null + return $? + } + local_rc_update () { + if [ "$#" -eq 0 ] ; then + /usr/sbin/service --status-all 2>&1 | sed -e 's/.* //' + return $? + fi + dir=$1 ; shift ; + svc=$1 ; shift ; + # disable|enable |remove + if [ $dir = add ] ; then + dire=enable + elif [ $dir = del ] ; then + dire=disable + else + dire=$dir + fi + update-rc.d $svc $dire || echo WARN: update-rc.d $svc $dir $dire + } +fi +proxy_rc_service () { local_rc_service $* ; } +proxy_rc_update () { local_rc_update $* ; } + +grep -q root=/dev/vda /proc/cmdline +PROXY_IS_VDA=$? + +## local_disable_lid +local_disable_lid () { + # https://bbs.archlinux.org/viewtopic.php?id=72779 + echo LID0 > /proc/acpi/wakeup + # https://forums.linuxmint.com/viewtopic.php?f=208&t=106532 + if [ -f /etc/UPower/UPower.conf ] ; then + [ -f /etc/UPower/UPower.conf.bak ] || \ + cp -p /etc/UPower/UPower.conf /etc/UPower/UPower.conf.bak + grep -q '^IgnoreLid=true' /etc/UPower/UPower.conf || \ + sed -e 's@#*IgnoreLid=.*@IgnoreLid=true@' -i /etc/UPower/UPower.conf + fi + if [ -f /etc/systemd/logind.conf ] ; then + [ -f /etc/systemd/logind.conf.bak ] || \ + cp -p /etc/systemd/logind.conf /etc/systemd/logind.conf.bak + grep -q '^HandleLidSwitch=ignore' /etc/systemd/logind.conf || \ + sed -e 's@^#*HandleLidSwitch=.*@HandleLidSwitch=ignore@' -i /etc/systemd/logind.conf + fi + return 0 +} + +## local_guest_neutersystemd +local_config_neutersystemd () { + [ ! -f /lib/lsb/init-functions.d/40-systemd ] || \ + mv /lib/lsb/init-functions.d/40-systemd /lib/lsb/.40-systemd + return 0 +} + +## local_guest_fstab_config +local_guest_fstab_config () { + + [ -d /mnt/mnt ] || mkdir /mnt/mnt + grep -q 9p /etc/fstab || { + echo mnt /mnt/mnt 9p trans=virtio,version=9p2000.L,posixacl,msize=10485760,cache=mmap \ + >> /etc/fstab + } + + # failsafe + grep 9p /etc/modules-load.d/*conf 2>/dev/null >/dev/null || \ + cat > /etc/modules-load.d/9p.conf << EOF +9p +9pnet +9pnet_virtio +EOF + lsmod | grep -q 9pnet_virtio || modprobe -a `cat /etc/modules-load.d/*.conf` + + return 0 +} + +## local_guest_config +local_guest_bootstrap () { + local_guest_fstab_config + return 0 +} + +## local_guest_config +local_guest_config () { + local_guest_bootstrap + local_guest_config_neutersystemd + + [ -d /etc/qemu ] || mkdir /etc/qemu + [ -f /etc/qemu/qemu-ga.conf ] || cat > /etc/qemu/qemu-ga.conf < /etc/default/qemu-guest-agent.diff < /etc/modules-load.d/$base + grep -v '^#' $file >> /etc/modules-load.d/$base + done + + # old +if [ -d /usr/local/etc/modules-load.d/ ] ; then + ls /etc/modules-load.d/vda*conf 2>/dev/null >/dev/null || \ + ln -s /usr/local/etc/modules-load.d/vda*conf /etc/modules-load.d + fi + + + return 0 +} + +# all +## local_lightdm_on_text +local_lightdm_on_text () { + return 0 + [ ! -f /usr/sbin/lightdm ] && return 0 + if [ ! -f /usr/sbin/lightdm.bin ] ; then + [ -f /usr/sbin/lightdm.bad ] && mv /usr/sbin/lightdm.bad /usr/sbin/lightdm.bin + [ ! -f /usr/sbin/lightdm.bin ] && mv /usr/sbin/lightdm /usr/sbin/lightdm.bin + if [ -f /usr/sbin/lightdm.bin ] && [ -f /usr/sbin/lightdm ] ; then + cat > /usr/sbin/lightdm << EOF +#!/bin/sh +grep -q ' text ' /proc/cmdline && exit 0 +exec /usr/sbin/lightdm.bin "$@" +EOF + chmod 755 /usr/sbin/lightdm + fi + fi + + return 0 +} + +## local_guest_start_services +local_guest_start_services () { local_guest_start ; } +## local_guest_start +local_guest_start () { + + local_guest_modules_load + + lsmod | grep -q 9pnet_virtio || \ + grep -hv '#' /etc/modules-load.d/vda*.conf | xargs modprobe --all + +# local_start_and_add_services $* + + exit 0 +} + +## local_guest_add_xorg_conf +local_guest_add_xorg_conf () { + [ -f /etc/X11/xorg.conf.d/80_qxl.conf ] || \ + grep -q 'Drive.*qxl' /etc/X11/xorg.conf.d/*.conf || \ + cat > /etc/X11/xorg.conf.d/80_qxl.conf << EOF +# BEGIN ANSIBLE MANAGED BLOCK proxy whonix_guest.yml +Section "Device" + Identifier "qxl" + Driver "qxl" + Option "DPI" "96 x 96" + Option "ENABLE_IMAGE_CACHE" "True" + Option "ENABLE_FALLBACK_CACHE" "False" + Option "ENABLE_SURFACES" "False" +EndSection +# END ANSIBLE MANAGED BLOCK proxy whonix_guest.yml +EOF + return 0 +} + + +## local_guest_status +local_guest_status () { + if [ ! -f /var/log/libvirtd/qemu-ga.log ] ; then + echo WARN: missing /var/log/libvirtd/qemu-ga.log + elif grep -q critical: /var/log/libvirtd/qemu-ga.log ; then + echo ERROR: critical /var/log/libvirtd/qemu-ga.log + fi + return 0 +} + +# vda +## local_host_symlink_usr_src +local_host_symlink_etc_fstab () { + # guest + [ -h /etc/fstab ] && [ -f /etc/fstab.vda ] && \ + rm -f /etc/fstab && ln -s /etc/fstab.vda /etc/fstab + + return 0 +} + +## local_host_make_dmcrypt_swap +local_host_make_dmcrypt_swap () { + local two five + + if ! grep -q '/dev/mapper\|/dev/sd\|/dev/dm' /proc/swaps ; then + blkid > ~/blkid.txt + five=`grep _05E ~/blkid.txt | head -1` + if [ $? -eq 0 -a -n "$five" ] ; then + two=`echo $five | sed -e 's/_.*//' -e 's/.*="//'`_02SWAP + if ! grep $two ~/blkid.txt ; then + dev=`echo $five | sed -e 's/:.*//' -e 's/5/2/'` + [ $? -eq 0 -a -n "$dev" ] && mkswap -L $two $dev + fi + grep $two /etc/conf.d/dmcrypt && local_rc_service dmcrypt restart || echo WARN: $two not in /etc/conf.d/dmcrypt + grep -q '/dev/mapper\|/dev/sd\|/dev/dm' /proc/swaps || local_rc_service swap restart + # if its not in fstab + grep -q '/dev/mapper\|/dev/sd\|/dev/dm' /proc/swaps || swapon /dev/mapper/cryptswap* + fi + fi + + return 0 +} + +# all +local_start_services () { local_start_and_add_services ; } +## local_start_and_add_services +local_start_and_add_services () { + + for elt in $*; do + local_rc_service $elt status >/dev/null || local_rc_service $elt start + local_rc_update | grep -q $elt || local_rc_update add $elt + done + + return 0 +} + + +# all +## local_manual_stop_services +local_manual_stop_services () { + # set these to stop now and restart them manually as we configure them + # rsync on debian + for elt in $* ; do + local_rc_service $elt status >/dev/null && local_rc_service $elt stop + local_rc_update | grep -q $elt && local_rc_update del $elt + done + + return 0 +} + + +# host +## local_host_symlink_usr_src +local_host_symlink_usr_src () { + local dir + # broken + dir=`cat /proc/cmdline|sed -e 's/.*BOOT_IMAGE=kernel-pentoo-x86_64/linux/' -e 's/_.*//'` + WD=$PWD + cd /usr/src + if [ -d $dir ] ; then + rm -f linux + ln -s $dir linux || echo WARN: $PWD/$dir not found + fi + cd $WD + + return 0 +} + +# host +## local_host_restart_psmouse +local_host_restart_psmouse () { + local_rc_service gpm status && local_rc_service gpm stop + rmmod psmouse; sleep 1; modprobe psmouse proto=exps + local_rc_service gpm start + return 0 +} + +# host +## local_host_restart_intel_sound +local_host_restart_intel_sound () { + which aplay >/dev/null 2>/dev/null || return 0 + + # both + if ! aplay -L | grep -q default:CARD=PCH ; then + rmmod snd_hda_intel ; + sleep 5 + modprobe snd_hda_intel enable=1 ; + sleep 1 + aplay -L >/dev/null || exit 2 + fi + + return 0 +} + +## local_kicksecure +local_kicksecure () { + local_guest || exit 1$? + return 0 +} + +## local_gateway +local_gateway () { + local_guest || exit 1$? + return 0 +} + +ELTS="sdwdate rads" + +## local_guest +local_guest () { + # grep -q text /proc/cmdline && local_lightdm_on_text + local_all + local_guest_config + local_guest_start + + if [ -d /etc/apt ] ; then + # bootstrap for ansible + if ! apt-cache search openssh-server | grep -q Size ; then + apt-get install openssh-server + fi + local_start_and_add_services ssh + if grep -q text /proc/cmdline ; then + local_manual_stop_services graphical lightdm rads + fi + fi + # local_manual_mask_services $ELTS + return 0 +} + +## local_host +local_host () { + local_host_crit_boot || exit 1$? + local_disable_lid + local_host_restart_intel_sound + local_host_restart_psmouse + local_all + return 0 +} + +# local_null_machineid +local_null_machineid () { + [ -s /etc/machine-id ] && cp /dev/null /etc/machine-id + return 0 +} + +## local_all +local_all () { + local_host_crit_boot || exit 2 + uuidgen > /etc/machine-id + + local_config_neutersystemd + local_null_machineid + + touch /var/log/boot + + chmod 775 /usr/local/*bin/*sh + local_neuter_gvfs + + ( cd /var/tmp && rm -rf ansible-local-* Temp-* ssh-* pulse-* .xfsm-ICE-* ) + + local_systemd_stop_and_mask + return 0 +} + +## local_manual_mask_services +local_manual_mask_services () { + if [ -d /usr/local/etc/systemd/ ] ; then + local_systemd_stop_and_mask $* || return 1$? + elif [ -x /usr/sbin/update-rc.d ] ; then + /usr/sbin/invoke-rc.d $1 stop + /usr/sbin/update-rc.d $1 remove || return 2$? + elif [ /sbin/rc-update ] ; then + /sbin/rc-service $1 stop + /sbin/rc-update $1 del || return 3$? + fi + + return 0 +} + +# local_guest_neutersystemd +local_guest_neutersystemd () { + local_systemd_stop_and_mask + return $? +} + +## local_systemd_stop_and_mask +local_systemd_stop_and_mask () { + [ -d /lib/systemd/system/ ] || return 0 + [ $# -eq 0 ] && [ -d /usr/local/etc/systemd/ ] && \ + set - `grep -l -v '#\|@\.service' /usr/local/etc/systemd/*.mask` + for file in $* ; do + [ -e /lib/systemd/system/$file ] || continue + elt=`basename $file` + systemctl is-enabled $elt 2>/dev/null >/dev/null || continue + echo INFO: local_systemd_stop_and_mask systemctl disable $elt + systemctl disable --now $elt && systemctl mask $elt + # [ -h /etc/systemd/system/$file ] + # [ `readlink /etc/systemd/system/$file ` = /dev/null ] + done + + return 0 +} + +## local_neuter_gvfs +local_neuter_gvfs () { + [ -d /usr/local/share/dbus-1/services ] || exit 0 + cd /usr/local/share/dbus-1/services + for file in /usr/share/dbus-1/services/*vfs* ; do + sed -e 's@^Exec=.*@Exec=/bin/false@' > `basename $file` + done +} + +# local_link_linux +local_link_linux () { + + sed < /proc/cmdline -e 's@.*BOOT_IMAGE=vmlinuz-@linux-@' -e 's/[_ ].*//'| \ + while read line ; do + [ -z "$line" ] && continue + [ -d "/usr/src/$line" ] || { echo WARN: /usr/src/$line ; continue ; } + rm -f /usr/src/linux + echo INFO: /usr/src/$line /usr/src/linux + ln -s /usr/src/$line /usr/src/linux + done + + return 0 +} + +# local_host_crit_boot +local_host_crit_boot () { + [ -d /mnt/l/syslinux ] || return 0 + local a=`grep BOOT_IMAGE /proc/cmdline |sed -e 's/.*BOOT_IMAGE=//' -e 's/ .*//'` + [ -n "$a" ] || return 1 + [ -f "/boot/$a" ] || return 2 + [ -f "/mnt/l/syslinux/$a" ] || return 3 + + diff "/boot/$a" "/mnt/l/syslinux/$a" || { + /usr/local/bin/base_wall.bash $prog 'CRIT: ' "/boot/$a" "/mnt/l/syslinux/$a" + return 4 + } + + a=`grep initrd= /proc/cmdline |sed -e 's/.*initrd=//' -e 's/ .*//' -e 's/.*,//'` + [ -n "$a" ] || return 11 + [ -f "/boot/$a" ] || return 12 + [ -f "/mnt/l/syslinux/$a" ] || return 13 + diff "/boot/$a" "/mnt/l/syslinux/$a" || { + /usr/local/bin/base_wall.bash $prog 'CRIT: ' "/boot/$a" "/mnt/l/syslinux/$a" + return 14 + } + + return 0 +} + +base=local +if [ -x /usr/bin/basename ] && [ `/usr/bin/basename -- $0` = $base'.bash' ] ; then + [ "$#" -eq 1 ] && [ "$1" = '-h' -o "$1" = '--help' ] && \ + echo USAGE: $0 && grep '^[a-z].*()\|^## ' $0 | sed -e 's/().*//'| sort \ + && exit 0 + "$@" + exit $? + fi + diff --git a/overlay/Linux/usr/local/etc/systemd/base.mask b/overlay/Linux/usr/local/etc/systemd/base.mask new file mode 100644 index 0000000..64eb41b --- /dev/null +++ b/overlay/Linux/usr/local/etc/systemd/base.mask @@ -0,0 +1,5 @@ +debug-shell.service +multi-user.target.wants/swap-file-creator.service +swap-file-creator.service +systemd-backlight@.service +systemd-backlight@backlight.service diff --git a/overlay/Linux/usr/local/sbin/base_chroot.bash b/overlay/Linux/usr/local/sbin/base_chroot.bash new file mode 100755 index 0000000..8d4f78e --- /dev/null +++ b/overlay/Linux/usr/local/sbin/base_chroot.bash @@ -0,0 +1,159 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +# export PATH=$PATH:/usr/local/bin +. /usr/local/bin/usr_local_tput.bash +# MUST be silent +error () { retval=$1 ; shift; ERROR $prog $* >&2 ; exit $retval ; } +usage () { echo "USAGE: $prog chroot-dir [command args] -" $* >&2 ; exit 1 ; } +warn () { : ; } +info () { : ; } +debug () { : ; } + +# must be run as root +[ "$( id -u )" -ne "0" ] && error 1 "must be run as root" + +[ -x /bin/chroot ] && EXE=/bin/chroot +[ -x /usr/sbin/chroot ] && EXE=/usr/sbin/chroot # debian + +setcap CAP_SYS_PTRACE=+ep $EXE + +if [ "$#" -eq "0" ] ; then + usage "give an absolute directory name as argument" +fi + +LARGS="" +CMD="" +while true; do + case "$1" in + '-'*) + LARGS="$1" + shift + ;; + *) + break + ;; + esac +done +[ -z "$LARGS" ] && LARGS="--userspec=0:0" +root=$1 +shift + +if [ ! -d "$root" ] ; then + error 1 "directory not found - $root" + fi + +# unix partition +[ -d $root/lost+found ] || WARN "No $root/lost+found" +# linux partition +[ -e $root/usr/src/ ] || WARN "No $root/usr/src" + +# check for /dev/loop devices - up to 255 on android +[ -e /dev/loop1 ] || \ + ( cd /dev && \ + for i in 0 1 2 3 4 5 6 7 ; do + [ -e loop$i ] && continue + mknod loop$i b 7 $i + chmod 660 loop$i + chgrp disk loop$i + done ) + +cd $root || error 6 "Can't cd to $root" + +# sbin/boostrap_chroot.bash +for file in .bashrc .bash_profile .bash_logout .emacs ; do + [ -f $root/root/$file ] && continue + cp -p /root/$file $root/root/ +done + +for file in tmp usr/tmp var/tmp ; do + [ -d $file ] && continue + mkdir $file || error 8 " missing directory $file" + chmod 1777 $file + done +# df /var/tmp | grep -q sd.12 || mount /var/tmp + +for file in proc sys dev dev/pts dev/shm usr ; do + [ -d $file ] && continue + mkdir $file || error 9 "Cant mkdir $file" + chmod 755 $file + done + +if false ; then + [ -e proc/self ] || mount -o bind /proc $root/proc || error 10 + # https://forums.gentoo.org/viewtopic-t-1061422-start-0.html + [ -e dev/null ] || mount -o bind /dev $root/dev || error 11 + # what happens to dev/shm ? its own memory? + # required for ansible and firefox + df -a | grep -q $root/dev/shm || mount -t tmpfs -o noexec,size=5% tmpfs $root/dev/shm || error 12 + [ -e dev/pts/ptmx ] || \ + mount -t devpts -o rw,relatime,gid=5,mode=620,ptmxmode=000 devpts $root/dev/pts || error 13 + else + # https://wiki.gentoo.org/wiki/Chroot + [ -e dev/loop0 ] || \ + { mount --rbind /dev $root/dev ; mount --make-rslave $root/dev ; } \ + || error 10 mount --rbind /dev $root/dev + [ -e proc/self ] || mount -t proc /proc $root/proc \ + || error 11 mount -t proc /proc + [ -e sys/block ] || \ + { mount --rbind /sys $root/sys ; mount --make-rslave $root/sys ; } \ + || error 12 --rbind /sys $root/sys + df -a | grep -q $root/dev/shm || \ + mount -t tmpfs -o noexec,size=5% tmpfs $root/dev/shm || error 14 $root/dev/shm + df -a | grep -q $root/tmp || mount --rbind /tmp $root/tmp \ + || error 13 mount --rbind /tmp $root/tmp + # https://wiki.gentoo.org/wiki/Project:X86/Chroot_Guide + [ -e dev/pts/ptmx ] || \ + mount -o bind /dev/pts $root/dev/pts || error 14 mount -o bind /dev/pts $root/dev/pts + fi + +# user +if [ -d $root/$HOME -a -f ~/.Xauthority ] ; then + cp ~/.Xauthority $root/$HOME + cp ~/.xauth* $root/$HOME + fi + +base=$( basename $root ) +[ -e ./start.rc ] || cat > ./start.rc << EOF +# env-update && . /etc/profile +export PS1='\${tty}\\u@${base}:\\W\\$ ' +EOF + +[ -z "$DISPLAY" ] || grep -q DISPLAY ./start.rc || \ + echo export DISPLAY=\"$DISPLAY\" >> ./start.rc + +# openpty failed: 'out of pty devices' +# root@Flati:11# d /dev/pts/ +# total 6 +# 2 ./ 4 ../ + +# You'll also want to copy over resolv.conf in order to have proper DNS name +# resolution from inside the chroot: +cp -L /etc/resolv.conf etc || error 16 "Cant cp -L /etc/resolv.conf" + +EARGS="CHROOT=$root PATH=/usr/sbin:/usr/bin:/sbin:/bin" +#? set these to root or derive them? what about -l? +EELTS="$EELTS TERM DISPLAY HOME LANG LC_ALL" +[ -z "$LC_COLLATE" ] && EELTS="$EELTS LC_COLLATE" || EARGS="$EARGS LC_COLLATE=C" + +. /usr/local/bin/proxy_export.bash >/dev/null + +EELTS="$EELTS http_proxy https_proxy socks_proxy no_proxy" +for elt in $EELTS ; do + EARGS="$EARGS $( env|grep ^${elt}= )" + done + +# mesg: ttyname failed: Success +tty=$( tty 2>/dev/null ) +[ $? -eq 0 -a -n "$tty" ] && EARGS="$EARGS TTY=$tty" + +# was /bin/bash -l +[ "$#" -eq 0 ] && set -- /bin/bash -i -l + +# Now you can chroot into your new system. Use env before chroot to ensure that no +# environment variables from the installation media are used by your new system: +#? PATH=$PATH +# info chroot $LARGS $root /usr/bin/env -i $EARGS "$@" +exec $EXE $LARGS $root /usr/bin/env -i $EARGS "$@" diff --git a/overlay/Linux/usr/local/sbin/base_chroot_caps.bash b/overlay/Linux/usr/local/sbin/base_chroot_caps.bash new file mode 100755 index 0000000..2c9a230 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/base_chroot_caps.bash @@ -0,0 +1,155 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +. /usr/local/bin/usr_local_tput.bash + +# MUST be silent +usage () { echo "USAGE: $prog chroot-dir [command args] -" $* >&2 ; exit 1 ; } +error () { retval=$1 ; shift; ERROR "$prog" $* >&2 ; exit $retval ; } +warn () { WARN "$prog" $* >&2 } +info () { INFO "$prog" $* >&2 } +debug () { DBUG "$prog" $* >&2 } + +# must be run as root +[ "$( id -u )" -ne "0" ] && error 1 "must be run as root" + +if [ "$#" -eq "0" ] ; then + usage "give an absolute directory name as argument" +fi + +LARGS="" +CMD="" +while true; do + case "$1" in + '-'*) + LARGS="$1" + shift + ;; + *) + break + ;; + esac +done +[ -z "$LARGS" ] && LARGS="--userspec=0:0" +root=$1 +shift + +if [ ! -d "$root" ] ; then + error "directory not found - $root" + fi + +# unix partition +[ -d $root/lost+found ] || warn "No $root/lost+found" +# linux partition +[ -e $root/usr/src/ ] || warn "No $root/usr/src" + +# check for /dev/loop devices +[ -e /dev/loop1 ] || \ + ( cd /dev && \ + for i in 0 1 2 3 4 5 6 7 ; do + [ -e loop$i ] && continue + mknod loop$i b 7 $i + chmod 660 loop$i + chgrp disk loop$i + done ) + +cd $root || error 6 "Can't cd to $root" + +# sbin/boostrap_chroot.bash +for file in .bashrc .bash_profile .bash_logout .emacs ; do + [ -f $root/root/$file ] && continue + cp -p /root/$file $root/root/ +done + +for file in tmp usr/tmp var/tmp ; do + [ -d $file ] && continue + mkdir $file || error 8 " missing directory $file" + chmod 1777 $file + done + +for file in proc sys dev dev/pts dev/shm usr ; do + [ -d $file ] && continue + mkdir $file || error 9 "Cant mkdir $file" + chmod 755 $file + done + +if false ; then + [ -e proc/self ] || mount -o bind /proc $root/proc || error 10 + # https://forums.gentoo.org/viewtopic-t-1061422-start-0.html + [ -e dev/null ] || mount -o bind /dev $root/dev || error 11 + # what happens to dev/shm ? its own memory? + # required for ansible and firefox + df -a | grep -q $root/dev/shm || mount -t tmpfs -o noexec,size=5% tmpfs $root/dev/shm || error 12 + [ -e dev/pts/ptmx ] || \ + mount -t devpts -o rw,relatime,gid=5,mode=620,ptmxmode=000 devpts $root/dev/pts || error 13 + else + # https://wiki.gentoo.org/wiki/Chroot + [ -e dev/null ] || \ + { mount --rbind /dev $root/dev ; mount --make-rslave $root/dev ; } \ + || error 10 mount --rbind /dev $root/dev + [ -e proc/self ] || mount -t proc /proc $root/proc \ + || error 11 mount -t proc /proc + [ -e sys/block ] || \ + { mount --rbind /sys $root/sys ; mount --make-rslave $root/sys ; } \ + || error 12 --rbind /sys $root/sys + df -a | grep -q $root/dev/shm || \ + mount -t tmpfs -o noexec,size=5% tmpfs $root/dev/shm || error 14 $root/dev/shm + # https://wiki.gentoo.org/wiki/Project:X86/Chroot_Guide + [ -e dev/pts/ptmx ] || \ + mount -o bind /dev/pts $root/dev/pts || error 14 mount -o bind /dev/pts $root/dev/pts + fi + +# user +if [ -d $root/$HOME -a -f ~/.Xauthority ] ; then + cp ~/.Xauthority $root/$HOME + cp ~/.xauth* $root/$HOME + fi + +base=$( basename $root ) +[ -e ./start.rc ] || cat > ./start.rc << EOF +# env-update && . /etc/profile +export PS1='\${tty}\\u@${osl}${base}:\\W\\$ ' +EOF + +[ -z "$DISPLAY" ] || grep -q DISPLAY ./start.rc || \ + echo export DISPLAY=\"$DISPLAY\" >> ./start.rc + +# You'll also want to copy over resolv.conf in order to have proper DNS name +# resolution from inside the chroot: +cp -L /etc/resolv.conf etc || error 16 "Cant cp -L /etc/resolv.conf" + +EARGS="CHROOT=$root PATH=/usr/sbin:/usr/bin:/sbin:/bin" +#? set these to root or derive them? what about -l? +EELTS="$EELTS TERM DISPLAY HOME LANG LC_ALL" +[ -z "$LC_COLLATE" ] && EELTS="$EELTS LC_COLLATE" || EARGS="$EARGS LC_COLLATE=C" + +. /usr/local/bin/proxy_export.bash >/dev/null + +EELTS="$EELTS http_proxy https_proxy socks_proxy no_proxy" +for elt in $EELTS ; do + EARGS="$EARGS $( env|grep ^${elt}= )" + done + +[ -n "$BOX_DEBIAN10_VAR_APT_ARCHIVES" ] && \ + EARGS="$EARGS $BOX_DEBIAN10_VAR_APT_ARCHIVES=$BOX_DEBIAN10_VAR_APT_ARCHIVES" + +# mesg: ttyname failed: Success +tty=$( tty 2>/dev/null ) +[ $? -eq 0 -a -n "$tty" ] && EARGS="$EARGS TTY=$tty" + +# was /bin/bash -l +[ "$#" -eq 0 ] && set -- /bin/bash -i -l + +# Now you can chroot into your new system. Use env before chroot to ensure that no +# environment variables from the installation media are used by your new system: + +INFO capsh --caps="CAP_SYS_PTRACE+ep CAP_SYS_CHROOT+ep" --keep=1 -- /usr/sbin/chroot $LARGS $root /usr/bin/env -i $EARGS "$@" +echo >$root/tmp/$$.bash \ +capsh '--caps="CAP_SYS_PTRACE+ep CAP_SYS_CHROOT+ep"' --keep=1 -- /tmp/$$.sh +echo >$root/tmp/$$.sh \ + '`which env`' -i $EARGS "$@" +capsh --caps="CAP_SYS_PTRACE+ep CAP_SYS_CHROOT+ep" --keep=1 --chroot=$root -- /tmp/$$.bash +# --chroot=$root -c /usr/bin/env -- -i $EARGS "$@" +# exec chroot $LARGS $root /usr/bin/env -i $EARGS "$@" diff --git a/overlay/Linux/usr/local/sbin/base_chroot_unbind.bash b/overlay/Linux/usr/local/sbin/base_chroot_unbind.bash new file mode 100755 index 0000000..762385d --- /dev/null +++ b/overlay/Linux/usr/local/sbin/base_chroot_unbind.bash @@ -0,0 +1,42 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- + +ROLE=base +prog=$( basename $0 .bash ) + +. /usr/local/bin/usr_local_tput.bash +error () { ERROR "$prog $2" ; exit $1 ; } + +# must be run as root +if [ "$( id -u )" != "0" ] ; then + echo ERROR: $0 run as root + exit 0 + fi +if [ "$#" -eq "0" ] ; then + error 2 "give an absolute directory name as argument" + fi +root=$1 +if [ ! -d "$1" ] ; then + error 3 "give an absolute directory name for chroot - $root" + fi + +mount | grep $root/ | while read a on elt rest ; do + umount $elt || { ERROR "unmounting $elt" ; exit 5 ; } + done + +mount | grep bind | while read a on elt rest ; do + umount $elt || { ERROR "unmounting $elt" ; exit 6 ; } + done + +umount -R $root + +lsof $root/usr 2>/dev/null \ + | sed -e 's@^[a-z]* *@@' -e 's@ .*@@' \ + | grep -v "$$\\|COMMAND" | sort -r -u | while read pid ; do + INFO "killing $pid" + kill $pid + sleep 10 +#? kill -9 $pid + done + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/base_patch_from_diff.bash b/overlay/Linux/usr/local/sbin/base_patch_from_diff.bash new file mode 100755 index 0000000..1939304 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/base_patch_from_diff.bash @@ -0,0 +1,44 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +. /usr/local/bin/usr_local_tput.bash || exit 2 + +[ "$DEBUG" = 1 ] && patch=patch || patch=echo + +TODIR=/ +[ $# -eq 0 ] && set -- * + +INFO patching $@ in $PWD +find "$@" -name \*.diff |while read file ; do + echo $file + relf=$( echo $file | sed -e 's/^root//' ) + base=$( echo $relf | sed -e 's/.diff$//' ) + dest="${TODIR}$base" + + if [ ! -f $dest ] && head -1 $file | grep -q /dev/null ; then + cp /dev/null $dest + $patch -b -z .dst $dest < $file + continue + fi + if [ ! -f $dest ] ; then + WARN BAD PATCH file missing dest=$dest for patch $file + continue + fi + if [ -f $dest.dst ] ; then + [ $dest -nt $file ] && DBUG $dest.dst done || WARN $dest -nt $PWD/$file + continue + fi + + $patch -b -z .dst $dest < $file 2>$base.err + retval=$? + if [ $? -eq 0 ] ; then + INFO patched $file + else + WARN patch ERROR $file `cat $base.err` + [ -s $base.err ] || rm -f $base.err + fi + [ -f $dest.rej ] && WARN $dest.rej exists +done +exit 0 diff --git a/overlay/Linux/usr/local/sbin/base_shutdown.bash b/overlay/Linux/usr/local/sbin/base_shutdown.bash new file mode 100755 index 0000000..90aafb6 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/base_shutdown.bash @@ -0,0 +1,69 @@ +#!/bin/sh +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash +ROLE=base +. /usr/local/bin/usr_local_base.bash || exit 2 + +. ~/.bash_logout + +# these can hang unmounting partitions +pkill dirmngr +pkill bootlogd + +[ -x /var/local/bin/privacy_home_cleaner.bash ] && /var/local/bin/privacy_home_cleaner.bash + +[ -f ~/Makefile ] && grep -q ^stop: ~/Makefile && \ + { cd ~ ; make stop || exit 2 ; } + +a=`virsh list | wc -l` +[ $? -eq 0 -a -n "$a" -a "$a" -gt 0 ] && proxy_whonix_host.bash stop + +local_base_umount () { + local mount + cd /mnt +mount=`mount` +for file in linux* ; do + echo $mount | grep -q " on /mnt/$file " || continue + echo /mnt/$file + umount -R /mnt/$file || exit 1 + done + +# not l - a b f d n u x i j k o q w e h z +for file in ? ; do + echo $mount | grep -q " on /mnt/$file " || continue + # echo /mnt/$file + umount /mnt/$file || echo WARN: $prog error umounting /mnt/$file + done + umount -a +} + +local_base_umount # || exit 3 + +# should be 0 +NUM=`losetup -a |grep -c -v home` +if [ $NUM -gt 0 ] ; then + losetup -a |grep -v home + echo losetup still mounted + exit 5 + fi + +sleep 10 +umount -a -t ntfs-3g + +# should be 1 +NUM=`ps ax | grep mount.ntfs-3g | grep -v grep | wc -l` +if [ $NUM -ge 1 ] ; then + ps ax | grep mount.ntfs-3g | grep -v grep + echo ERROR: mount.ntfs-3g still running + exit 6 + fi + +INFO Calling shutdown + +if [ $# -lt 1 ] ; then + shutdown -r now + else + shutdown $* + fi diff --git a/overlay/Linux/usr/local/sbin/bootstrap_chroot_kicksecure.bash b/overlay/Linux/usr/local/sbin/bootstrap_chroot_kicksecure.bash new file mode 100755 index 0000000..25dd12a --- /dev/null +++ b/overlay/Linux/usr/local/sbin/bootstrap_chroot_kicksecure.bash @@ -0,0 +1,88 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; encoding: utf-8-unix -*- +set -e + +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash +ROLE=base + +. /usr/local/bin/usr_local_tput.bash + +error () { ERROR "$0 $2" ; exit $1 ; } + +VERS=15.0.1.5.4 +TYPE=XFCE +HTTP_DIR=/g/Privacy/net/Http +URL=download.whonix.org/ova/$VERS/Kicksecure-${TYPE}-$VERS +TMPDIR=URL=$HTTP_DIR/download.whonix.org/ova/ + +NBD_DEV=/dev/nbd1 + +if [ ! -f $HTTP_DIR/$URL.ova ] ; then + wget -xcP $HTTP_DIR/ https://$URL.ova || error 2 wget +fi + +[ -d $TMPDIR ] || mkdir -p $TMPDIR || error 3 $TMPDIR +cd $TMPDIR || error 4 cd $TMPDIR + +if [ ! -f Kicksecure-${TYPE}-$VERS-disk001.vmdk ] ; then + echo INFO: $HTTP_DIR/$URL.ova + tar xvf $HTTP_DIR/$URL.ova || error 4 tar +fi + +if [ ! -f Kicksecure-${TYPE}-${VERS}-disk001.qcow2 ] ; then + echo INFO: Kicksecure-${TYPE}-$VERS-disk001.qcow2 + qemu-img convert -O qcow2 Kicksecure-${TYPE}-$VERS-disk001.vmdk Kicksecure-${TYPE}-$VERS-disk001.qcow2 +fi + + +# must be run as root +if [ "$( id -u )" != "0" ] ; then + echo ERROR: $0 run as root + exit 0 + fi +if [ "$#" -eq "0" ] ; then + root=/mnt/qcow2/KickXFCE150154 + else + root=$1 + fi +[ -d "$root" ] || mkdir $root +if [ ! -d "$root" ] ; then + error 3 "give an absolute directory name for chroot - $root" + fi + +if [ ! -e ${NBD_DEV}p1 ] ; then + echo INFO: qemu-nbd -c ${NBD_DEV} Kicksecure-${TYPE}-$VERS-disk001.qcow2 + qemu-nbd -c ${NBD_DEV} Kicksecure-${TYPE}-${VERS}-disk001.qcow2 +fi + +fdisk -l ${NBD_DEV} | grep ${NBD_DEV}p1 || exit 6 + +df | grep " $root" || mount ${NBD_DEV}p1 $root + +[ -d /usr/local/tmp/wheels ] || \ + ( cd /usr/local/tmp ; bash /usr/local/sbin/bootstrap_wheels.bash ; ) + +[ -d $root/usr/local/tmp ] || \ + { mkdir $root/usr/local/tmp ; chmod 1777 $root/usr/local/tmp ; } +[ -d $root/usr/local/tmp/wheels ] || \ + cp -rip /usr/local/tmp/wheels $root/usr/local/tmp/wheels +[ -d $root/usr/local/sbin ] || \ + { mkdir $root/usr/local/sbin ; } +[ -f $root/usr/local/sbin/bootstrap_pip_ansible.bash ] || \ + { cp -p /usr/local/sbin/bootstrap_*.bash $root/usr/local/sbin ; } +[ -d $root/usr/local/etc/ssl ] || \ + { mkdir $root/usr/local/etc/ssl ; } +[ -f /usr/local/etc/ssl/cacert-testforge.pem -a \ + ! -f $root//usr/local/etc/ssl/cacert-testforge.pem ] && \ + cp -p /usr/local/etc/ssl/cacert-testforge.pem $root/usr/local/etc/ssl/cacert-testforge.pem + +. /usr/local/bin/proxy_export.bash +echo INFO: /usr/local/sbin/update_chroot.bash $root +echo BOX_DEBIAN10_VAR_APT_ARCHIVES=/mnt/o/Cache/Apt/Debian/10.6/var/cache/apt/archives +echo BOX_BOXUSER_PLAY_PIP_CACHE=/mnt/o/Cache/Pip +echo BOX_USER_NAME=user +echo export http_proxy=$http_proxy +echo export https_proxy=$https_proxy +echo export socks_proxy=$socks_proxy +echo /usr/local/sbin/bootstrap_pip_ansible.bash diff --git a/overlay/Linux/usr/local/sbin/bootstrap_pentoo_virtualbox_guest.bash b/overlay/Linux/usr/local/sbin/bootstrap_pentoo_virtualbox_guest.bash new file mode 100755 index 0000000..947e3f0 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/bootstrap_pentoo_virtualbox_guest.bash @@ -0,0 +1,56 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +ROLE=hostvms +export LANG=en_US.UTF-8 +kernel=5.0.8-pentoo +hostname=pentoo + +cd /lib/modules/$kernel +# These interfere with installing virtualbox-guest-additions requried for vboxsf +[ -f kernel/drivers/staging/vboxvideo/vboxvideo.ko.xz ] && \ + mv kernel/drivers/staging/vboxvideo/vboxvideo.ko.xz kernel/drivers/staging/vboxvideo/vboxvideo.ko.xz.dst +[ -f kernel/drivers/virt/vboxguest/vboxguest.ko.xz ] && \ + mv kernel/drivers/virt/vboxguest/vboxguest.ko.xz kernel/drivers/virt/vboxguest/vboxguest.ko.xz.dst + depmod -a 5.0.8-pentoo + +cd /etc/modprobe.d/ +if [ ! -f blacklist.conf.dst ] ; then + mv blacklist.conf blacklist.conf.dst + cp blacklist.conf.dst blacklist.conf +fi +# maybe not all are needed +for elt in drm vbox video ttm ; do + grep "blacklist $elt" blacklist.conf || \ + echo "blacklist $elt" >> blacklist.conf +done + +cd /etc/ssh/ +if [ ! -f sshd_config.dst ] ; then + mv sshd_config sshd_config.dst + cp sshd_config.dst sshd_config + fi + +#FixMe: nano sshd_config +rc-update add NetworkManager +rc-update add sshd default + +cd /root/ +date_slash=$( date +%Y/%m/%d ) +[ -d var/tmp/$hostname/$date_slash ] || mkdir -p var/tmp/$hostname/$date_slash +cd var/tmp/Pentoo/$date_slash + +eix brltty | grep -q Installed && \ + emerge -C brltty>emerge-C_brltty.log 2>&1 + +if [ ! /etc/portage/make.conf.dst ] ; then + mv /etc/portage/make.conf /etc/portage/make.conf.dst + cp /etc/portage/make.conf.dst /etc/portage/make.conf + fi +# FixMe: nano /etc/portage/make.conf + +emerge -fp =app-emulation/virtualbox-guest-additions-6.0.6>virtualbox-guest-additions-6.0.6.lis 2>&1 +# get the files... +emerge -vb =app-emulation/virtualbox-guest-additions-6.0.6>virtualbox-guest-additions-6.0.6.log 2>&1 + +rc-update add virtualbox-guest-additions diff --git a/overlay/Linux/usr/local/sbin/bootstrap_pip_ansible.bash b/overlay/Linux/usr/local/sbin/bootstrap_pip_ansible.bash new file mode 100755 index 0000000..d04a6bf --- /dev/null +++ b/overlay/Linux/usr/local/sbin/bootstrap_pip_ansible.bash @@ -0,0 +1,509 @@ +#!/bin/bash -e +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +PREFIX=/usr/local +ROLE=base + +shopt -o -s pipefail +DEBUG=1 +. /usr/local/bin/usr_local_tput.bash + +[ $( id -u ) -eq 0 ] || { ERROR "this must be run as root" ; exit 1 ; } + +. /usr/local/bin/proxy_export.bash + +WD=$PWD +MV=mv +COPY="ln -s" + +PYVER=3 +PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$BASE_PYTHON2_MINOR" ] && \ + BASE_PYTHON2_MINOR=$( python2 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) +[ -z "$BASE_PYTHON3_MINOR" ] && \ + BASE_PYTHON3_MINOR=$( python3 --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + +if [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib +elif [ -z "$LIB" -a -d /usr/lib64/python$PYTHON_MINOR/site-packages ] ; then + LIB=lib64 +elif [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/dist-packages ] ; then + LIB=lib + mkdir -p /usr/local/lib/python$PYTHON_MINOR/site-packages + ln -s /usr/local/lib/python$PYTHON_MINOR/dist-packages \ + /usr/local/lib/python$PYTHON_MINOR/site-packages +elif [ -z "$LIB" -a -d /usr/lib/python$PYTHON_MINOR/dist-packages ] ; then + LIB=lib64 + mkdir -p /usr/local/lib64/python$PYTHON_MINOR/site-packages + ln -s /usr/local/lib64/python$PYTHON_MINOR/dist-packages \ + /usr/local/lib64/python$PYTHON_MINOR/site-packages +elif [ -z "$LIB" -a -d /usr/lib/python$PYVER/dist-packages ] ; then + LIB=lib + mkdir -p /usr/local/lib/python$PYTHON_MINOR/site-packages + ln -s /usr/local/lib/python$PYTHON_MINOR/dist-packages \ + /usr/local/lib/python$PYTHON_MINOR/site-packages +elif [ -z "$LIB" -a -d /usr/lib/python$PYVER/dist-packages ] ; then + LIB=lib64 + mkdir -p /usr/local/lib64/python$PYTHON_MINOR/site-packages + ln -s /usr/local/lib64/python$PYTHON_MINOR/dist-packages \ + /usr/local/lib64/python$PYTHON_MINOR/site-packages + fi + +if [ -z "$LIB" ] ; then + ERROR LIB=$LIB empty - no /usr/lib*/python$PYTHON_MINOR/site-packages + exit 3 +elif [ -n "$LIB" -a ! -d /usr/$LIB/python$PYTHON_MINOR/site-packages ] ; then + ERROR LIB=$LIB but no /usr/$LIB/python$PYTHON_MINOR/site-packages + exit 3 +fi +INFO LIB=$LIB /usr/$LIB/python$PYTHON_MINOR/site-packages + +[ -z "$UPTMP" ] && UPTMP=$PREFIX/tmp +# With packer the files we need are not on the host - they are pushed up and $UPTMP is populated with: +PDIRS="authorized_keys archives boxuser_pip_cache root_pip_cache cacert.pem wheels" +# With vagrant the files may have been tarred on the host and be in their cannonical positions. +# We symlink to files under vagrant to /tmp to leave the packer scripts untouched. +# With packer and docker we can remote mount partitions and not even copy them up to the guest. + +[ -n "$TESTF_DEBIAN10_VAR_APT_ARCHIVES" ] && [ -d "$TESTF_DEBIAN10_VAR_APT_ARCHIVES/" ] && \ + [ ! -e $UPTMP/archives ] && ln -s $TESTF_DEBIAN10_VAR_APT_ARCHIVES/ $UPTMP/archives + ln -s $TESTF_DEBIAN10_VAR_APT_ARCHIVES/*.deb /var/cache/apt/archives 2>/dev/null +[ -n "$HOSTVMS_BOXUSER_PLAY_PIP_CACHE" ] && [ -e "$HOSTVMS_BOXUSER_PLAY_PIP_CACHE" ] && \ + [ ! -e $UPTMP/boxuser_pip_cache ] && ln -s $HOSTVMS_BOXUSER_PLAY_PIP_CACHE/ $UPTMP/boxuser_pip_cache +[ -n "$HOSTVMS_ROOT_PLAY_PIP_CACHE" ] && [ -d "$HOSTVMS_ROOT_PLAY_PIP_CACHE/" ] && \ + [ ! -e $UPTMP/root_pip_cache ] && ln -s "$HOSTVMS_ROOT_PLAY_PIP_CACHE/" $UPTMP/root_pip_cache + +[ -d /usr/local/etc/testforge ] || mkdir -p /usr/local/etc/testforge +export PLAY_PIP_CERT="/usr/local/etc/ssl/cacert-testforge.pem" +[ -f $PLAY_PIP_CERT ] && \ + [ ! -e $UPTMP/cacert.pem ] && ln -s $PLAY_PIP_CERT $UPTMP/cacert.pem + +# config_file = os.environ.get('PIP_CONFIG_FILE', None) +# /usr/$LIB/python2.7/site-packages/pip/_internal/configuration.py + +bootstrap_mkdir () { mkdir -p $1 ; chgrp $BOX_ALSO_GROUP $1 ; } +[ -d /usr/local/tmp ] || { mkdir -p /usr/local/tmp ; chmod 1777 /usr/local/tmp ; } + +site_packages=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages +[ -d $site_packages ] || bootstrap_mkdir $site_packages +[ -f $site_packages/__init__.py ] || touch $site_packages/__init__.py +if [ ! -d /usr/local/tmp/wheels ] ; then + cd /usr/local + sh sbin/bootstrap_wheels.bash || exit 4 +fi +[ ! -d $UPTMP/wheels/ ] && [ $UPTMP/ != /usr/local/tmp/ ] && \ + ln -s /usr/local/tmp/wheels $UPTMP/wheels + +# But with vagrant or docker we may have mounted the HOST partitions that contain the files +# [ -z "$TESTF_UBUNTU16_VAR_APT_ARCHIVES" ] && TESTF_UBUNTU16_VAR_APT_ARCHIVES -> $UPTMP/archives + +[ -d /etc/portage -a -z "$BOX_USER_NAME" ] && BOX_USER_NAME=vagrant +[ -d /etc/apt -a -z "$BOX_USER_NAME" ] && BOX_USER_NAME=devuan +[ -z "$BOX_USER_HOME" ] && BOX_USER_HOME=/home/$BOX_USER_NAME +[ -z "$BOX_ALSO_GROUP" ] && BOX_ALSO_GROUP=adm + +[ -z "$LOGDIR" ] && LOGDIR=$PREFIX/tmp +[ -d $LOGDIR ] || { mkdir $LOGDIR ; chmod 1777 $LOGDIR ; } + +# not needed: --no-binary :all: --upgrade-strategy only-if-needed +# not yet: --user +PIP_ARGS="" +PIP_INSTALL_ARGS="--disable-pip-version-check --prefix=$PREFIX" +scripts="ansible ansible-playbook ansible-pull ansible-doc ansible-galaxy ansible-console ansible-connection ansible-vault" + +[ -d /etc/apt ] && export DEBIAN_FRONTEND=noninteractive +export PIP_DEFAULT_TIMEOUT=60 + +ANSIBLE_VER="2.9.10" +#2? PYYAML_VER="3.12" +ansible_tgz=ansible-$ANSIBLE_VER.tar.gz +#2? yaml_tgz=PyYAML-$PYYAML_VER.tar.gz + +if [ -n "$BOX_USER_NAME" ] ; then + # Packer will not have created this and we will need it early. + [ -d $BOX_USER_HOME ] || \ + bootstrap_mkdir $BOX_USER_HOME + #? useradd -d $BOX_USER_HOME -G root -m $BOX_USER_NAME + + # If you want to use your own private key for packer + [ -d $BOX_USER_HOME/.ssh ] || \ + bootstrap_mkdir $BOX_USER_HOME/.ssh + + if [ -f $UPTMP/authorized_keys ] ; then + $COPY $UPTMP/authorized_keys $BOX_USER_HOME/.ssh && \ + chmod 600 $BOX_USER_HOME/.ssh/authorized_keys + fi + chmod 700 $BOX_USER_HOME/.ssh/ +fi + +[ -d /etc/apt -a -d /var/cache/apt/archives ] || mkdir -p /var/cache/apt/archives +# If you upload your cache of Ubuntu .debs, it cuts down on the downloading +[ -d $UPTMP/archives ] && \ + $COPY $UPTMP/archives/*.deb /var/cache/apt/archives 2>/dev/null +# leave this for cleanup: +# rm -rf $UPTMP/archives + +# If you upload your cache of pip files, it cuts down on the downloading +if [ -d $UPTMP/boxuser_pip_cache ] ; then + bootstrap_mkdir $BOX_USER_HOME/.cache/ && \ + cp -rip $UPTMP/boxuser_pip_cache $BOX_USER_HOME/.cache/pip && \ + chown -R ${BOX_USER_NAME}.{BOX_ALSO_GROUP} $BOX_USER_HOME/.cache/pip && \ + chmod -R g+rw $BOX_USER_HOME/.cache/pip && \ + chmod -R o-w $BOX_USER_HOME/.cache/pip +fi +if [ -d $UPTMP/root_pip_cache ] ; then + bootstrap_mkdir /root/.cache/ && \ + cp -rip $UPTMP/root_pip_cache /root/.cache/pip && \ + chown -R root.root /root/.cache/pip && \ + chmod -R g+rw /root/.cache/pip && \ + chmod -R o-w /root/.cache/pip +fi + +if [ -d /etc/apt ] ; then + if ! route | grep -q ^default ; then + DBUG "Not connected; skipping apt-get update" + elif [ ! -f /var/log/dpkg.log ] ; then + apt-get update # || exit 4 + fi + which unzip || ! [ -f /var/cache/apt/archives/unzip_*_amd64.deb ] || \ + dpkg -i /var/cache/apt/archives/unzip_*_amd64.deb + which curl || [ ! -f /var/cache/apt/archives/curl_*_amd64.deb ] || \ + dpkg -i /var/cache/apt/archives/curl_*_amd64.deb \ + /var/cache/apt/archives/libcurl4_*_amd64.deb \ + /var/cache/apt/archives/libcurl4-openssl-dev_*_amd64.deb + apt-get install -y --force-yes wget unzip openssl || true + [ -f /usr/include/Python.h ] || \ + apt-get install -y --force-yes \ + libffi-dev libssl-dev python3-dev python3-pycparser \ + python3-coverage || \ + echo WARN you must run apt-get update + # msg: Could not find `coverage` module. ?python3-apt ? + +elif [ -d /etc/portage ] ; then + # FixMe: put these in wheels? + [ -x /usr/bin/unzip ] || which unzip 2>/dev/null || emerge -vb app-arch/unzip + [ -x /usr/bin/wget ] || which wget 2>/dev/null || emerge -vb net-misc/wget + which openssl 2>/dev/null || timeout 600 emerge -vb dev-libs/openssl + # openssl installs: + # dev-python/pyopenssl-19.1.0 + # dev-python/six-1.13.0 + # dev-python/cryptography-2.8 + # dev-python/cffi-1.12.3:0/1.12.3 + # dev-python/pycparser-2.19-r1 + # dev-python/ply-3.11:0/3.11 + # virtual/python-ipaddress-1.0-r1 + # dev-python/ipaddress-1.0.23 + # virtual/python-enum34-2 + # dev-python/enum34-1.1.6-r1 + python$PYVER -c 'import OpenSSL' 2>/dev/null || \ + timeout 600 emerge -vb dev-python/pyopenssl + python$PYVER -c 'import pycparser' 2>/dev/null || \ + timeout 600 emerge -vb dev-python/pycparser + python$PYVER -c 'import yaml' 2>/dev/null || \ + timeout 600 emerge -vb dev-python/pyyaml + DBUG "Gentoo Installed openssl and wget" +fi + +# On a CORP laptop off the VPN we may need some CAs +[ -d $PREFIX/etc/ssl ] || mkdir -p $PREFIX/etc/ssl +[ ! -f $PLAY_PIP_CERT ] && \ + [ -f $UPTMP/cacert.pem ] && \ + $COPY $UPTMP/cacert.pem $PLAY_PIP_CERT + +# pip gets confused +# or just delete $PREFIX/$LIB/python$PYTHON_MINOR/dist-packages afterwards + + PYTHON_MINOR=$( python$PYVER --version 2>&1| sed -e 's@^.* @@' -e 's@\.[0-9]*$@@' ) + + site_packages=$PREFIX/$LIB/python$PYTHON_MINOR/site-packages + [ -d $site_packages ] || bootstrap_mkdir $site_packages + [ -f $site_packages/__init__.py ] || touch $site_packages/__init__.py + if [ -d /etc/apt ] ; then + dist_packages=$PREFIX/lib/python$PYTHON_MINOR/dist-packages + WD=$PWD + if [ -d $dist_packages ] ; then + cd $PREFIX/lib/python$PYTHON_MINOR + ln -s $site_packages . + cd $WD + fi + fi + + # we will use $PREFIX/bin/python3.bash NOT $PREFIX/bin/python3.sh + # to not conflict with what Ansible will push later/before. + if [ ! -e $PREFIX/bin/python$PYVER.bash ] ; then + INFO "bootstrapping $PREFIX/bin/python$PYVER.bash" + cat > $PREFIX/bin/python$PYVER.bash << EOF +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# from bootstrap_pip_ansible.bash +. /usr/local/bin/usr_local_tput.bash || exit 2 +PREFIX=/usr/local + +# pip gets confused +dist_packages=$site_packages +dist_packages=\$dist_packages:\${dist_packages}/pip/_vendor +if [ -z "$PYTHONPATH" ] ; then + export PYTHONPATH=\$dist_packages +else + export PYTHONPATH=\$PYTHONPATH:\$dist_packages +fi + +exec python$PYVER "\$@" +EOF + chmod 755 $PREFIX/bin/python$PYVER.bash + + fi + + # pip may be loaded in the base iso + if [ -x $PREFIX/bin/python$PYVER.bash ] && \ + $PREFIX/bin/python$PYVER.bash -c 'import pip' 2>/dev/null ; then + INFO pip$VER already installed + elif [ ! -d $UPTMP/wheels/ ] ; then + WARN $UPTMP/wheels not found + else + # we may be without the VPN/proxy but on a corporate laptop + # with a hosed chain of Certificate Authorities for the MITM proxy + # in which case http://bootstrap.pypa.io/get-pip.py will not work, + # so effective but groddy: + # just unzip the wheels into site-packages and force-reinstall later + cd $UPTMP/wheels/ + + INFO "installing pip - unzipping wheels into $site_packages" + for file in *.whl ; do + #a=$( echo $file | sed -e 's/-.*//' ) + #b=$( basename $a|sed -e 's/Py//'|tr '[A-Z]' '[a-z]' ) + #python$PYVER -c "import $b" 2>/dev/null >/dev/null && continue + unzip -n $file -d $site_packages >/dev/null + done + + # morons + # -rwx------ 1 root root 8866 Jun 11 2018 /usr/local/$LIB/python$PYTHON_MINOR/site-packages/idna-2.7.dist-info/METADATA + find $site_packages -type d -exec chmod a+rx '{}' \; + find $site_packages -type f -exec chmod a+r '{}' \; + chgrp -R "$BOX_ALSO_GROUP" $site_packages + + # hack in a PYTHONPATH for our unzipped wheels - removed later + for elt in pip ; do # is wheel needed? + INFO "Installing $elt" + # use $PYVER.bash for bootstrap - $PYVER.bash will come later + [ -f $PREFIX/bin/$elt$PYVER.bash ] || \ + cat > $PREFIX/bin/$elt$PYVER.bash << EOF +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +export PLAY_PIP_CERT=$PIP_CERT +export PYTHONPATH=${site_packages} +export PYTHONPATH=\$PYTHONPATH:${site_packages}/pip/_vendor +#? FixMe: narrow to InsecurePlatformWarning +python$PYVER -W ignore -m $elt "\$@" +EOF + chmod 755 $PREFIX/bin/$elt$PYVER.bash + $PREFIX/bin/$elt$PYVER.bash --help >/dev/null + DBUG "Installed $elt$PYVER.bash" + done + fi + + # do I still need this + #if [ -x $PREFIX/bin/pip$PYVER ] && [ -d $site_packages ] ; then + # export PYTHONPATH=$site_packages:$site_packages/pip/_vendor + #fi + + if [ ! -x $PREFIX/bin/pip$PYVER.sh ] ; then + ERROR "Failed to Install pip$PYVER at $PREFIX/bin/pip$PYVER.sh" + exit 3 + elif ! $PREFIX/bin/python$PYVER.bash -m pip -V ; then + ERROR "Failed to run pip$PYVER at $PREFIX/bin/pip$PYVER" + exit 4 + fi + + if [ -f $PLAY_PIP_CERT ] ; then + if [ ! -f $site_packages/pip/_vendor/requests/cacert.pem.dst ] && \ + [ -f $site_packages/pip/_vendor/requests/cacert.pem ] && \ + [ ! -h $site_packages/pip/_vendor/requests/cacert.pem ] ; then + mv $site_packages/pip/_vendor/requests/cacert.pem \ + $site_packages/pip/_vendor/requests/cacert.pem.dst + fi + if [ ! -h $site_packages/pip/_vendor/requests/cacert.pem ] ; then + rm -f $site_packages/pip/_vendor/requests/cacert.pem + fi + [ -e $site_packages/pip/_vendor/requests/cacert.pem ] || \ + ln -s $PLAY_PIP_CERT $site_packages/pip/_vendor/requests/cacert.pem + INFO linked $PLAY_PIP_CERT $site_packages/pip/_vendor/requests/cacert.pem + fi + +# dont use -CAfile $UPTMP/cacert.pem - we want it to fail if we need the cert +if ! route | grep -q ^default ; then + DBUG "Not connected; skipping SSL Certificate Authority chain" +elif [ -n "$https_proxy" ] ; then + proxy=`echo "$https_proxy" | sed -e 's/https*:\/*//'` + openssl s_client -connect pypi.org:443 --proxy $proxy > $LOGDIR/pip_install_pip_ansible.log 2>&1 || { + ERROR pip$PYVER.sh $PIP_ARGS install $PIP_INSTALL_ARGS $ansible_tgz + tail $LOGDIR/pip_install_pip_ansible.log + exit 8 + } + return 0 +} + +boostrap_patch_ansible () { + local WD=$PWD + + [ -d /usr/local/patches/base ] || return 0 + [ -f /usr/local/sbin/base_patch_from_diff.bash ] || return 0 + + cd /usr/local/patches/base || return 1 + [ -d usr/local/src/ansible-$ANSIBLE_VER ] || return 0 + + # this vacuumns all diff files below the root + /usr/local/sbin/base_patch_from_diff.bash usr/local/src/ansible-$ANSIBLE_VER + + return 0 +} + +boostrap_setup_ansible () { + local WD=$PWD + + cd /usr/local/src + [ -d ansible-$ANSIBLE_VER ] || tar xfz $UPTMP/wheels/$ansible_tgz + cd ansible-$ANSIBLE_VER + + /usr/local/sbin/base_patch_from_diff.bash usr/local/src/ansible-$ANSIBLE_VER + + RARGS=" --user $RARGS" + # RARGS=" --install-layout=unix $RARGS" + export PYTHONPATH=/usr/local/$LIB/python$PYTHON_MINOR/site-packages + DBUG "/usr/local/bin/python$PYVER.bash setup.py install $RARGS" + sudo -u $"$BOX_USER_NAME" \ + /usr/local/bin/python$PYVER.bash setup.py install $RARGS \ + >> install.log + retval=$? + cd $WD + return $retval +} + +# NOW we use our fresh pip to install ansible from source, into /usr/local +if [ -d $PREFIX/src/ansible-$ANSIBLE_VER ] ; then + INFO already installed $PREFIX/src/ansible-$ANSIBLE_VER + elif [ ! -f $UPTMP/wheels/$ansible_tgz ] ; then + ERROR tgz missing $UPTMP/wheels/$ansible_tgz + exit 7 + else + if false ; then + boostrap_pip_ansible + else + boostrap_setup_ansible + [ $? -eq 0 ] || { ERROR installing ansible ; tail install.log ; exit 8 ; } + fi + boostrap_patch_ansible + + if [ -d /etc/portage/ ] ; then + [ -d /etc/portage/profile ] || mkdir /etc/portage/profile + grep -q app-admin/ansible-$ANSIBLE_VER /etc/portage/profile/package.provided || \ + echo app-admin/ansible-$ANSIBLE_VER >> /etc/portage/profile/package.provided + fi + + cd $PREFIX/bin + [ -e ansible-doc ] || { ERROR installing ansible-doc ; exit 9 ; } + grep "#\!.$PREFIX/bin/python$PYVER.bash" ansible-doc || \ + sed -e "s@^#\!.*python.*@#\!${PREFIX}/bin/python$PYVER.bash@" -i $scripts +fi + +ansible --version || exit 10 + +if [ -f $PLAY_PIP_CERT ] ; then + export PLAY_PIP_CERT=$PIP_CERT + PIP_INSTALL_ARGS="$PIP_INSTALL_ARGS --cert $PLAY_PIP_CERT" + else + WARN "PLAY_PIP_CERT not found $PIP_CERT" + fi + +[ ! -f /etc/wgetrc ] || sh $WD/bootstrap_proxy.bash + +# pip uses curl - and has a config file PIP_CONFIG +DBUG "http_proxy=$http_proxy https_proxy=$https_proxy" +if [ -n "$https_proxy" ] ; then + INFO "Adding to PIP_INSTALL_ARGS --proxy=$https_proxy" + elif [ -f /etc/wgetrc ] && grep ^http_proxy /etc/wgetrc ; then + proxy=$( grep ^http_proxy /etc/wgetrc|sed -e 's@.*=@--proxy=@' ) + INFO "Adding to PIP_INSTALL_ARGS $proxy" + PIP_INSTALL_ARGS="$PIP_INSTALL_ARGS $proxy" +fi + +cd $PREFIX/src +# install pycurl as a test of pip and a requisite for proxyauth.py +if ! $PREFIX/bin/python$PYVER.bash -c 'import curl' 2>/dev/null ; then + if [ -d /etc/apt ] ; then + apt-get install -y --force-yes libcurl4-openssl-dev \ + 2>&1 | tee $LOGDIR/apt-get_install_libcurl4-openssl-dev.log + elif [ -d /etc/portage ] ; then + [ -x /usr/bin/curl ] || which curl 2>/dev/null || emerge -vb curl + fi + #? --allow-unverified pycurl + if ! route | grep -q ^default ; then + INFO "Not connected; not installing pycurl" + elif $PREFIX/bin/pip$PYVER.sh install $PIP_INSTALL_ARGS pycurl >> $LOGDIR/pip_install_pycurl.log 2>&1 ; then + INFO "Installed pycurl from pip with $PREFIX/bin/pip install $PIP_INSTALL_ARGS" + # We dont fail the packer build if it errors - just fix it and rerun + $PREFIX/bin/python$PYVER.bash -c 'import curl; print curl.__file__' || true + else + WARN "Installing pycurl failed with $PREFIX/bin/pip install $PIP_INSTALL_ARGS" + cat $LOGDIR/pip_install_pycurl.log + fi + fi + +[ -e /usr/local/bin/python$PYVER.sh ] || \ + [ -h /usr/local/bin/python$PYVER.sh ] || \ + ln -s /usr/local/bin/python$PYVER.bash /usr/local/bin/python$PYVER.sh + +find /usr/local/$LIB/python$PYTHON_MINOR/site-packages/ansible/modules/ -name \*.py \ + -exec grep -q /usr/bin/python '{}' \; -print \ + -exec sed -e "1,3s@#!/usr/bin/python@#!/usr/local/bin/python$PYVER.bash@" -i '{}' \; + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/bootstrap_proxy.bash b/overlay/Linux/usr/local/sbin/bootstrap_proxy.bash new file mode 100755 index 0000000..79aac29 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/bootstrap_proxy.bash @@ -0,0 +1,61 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +set -e +shopt -o -s pipefail + +prog=$( basename $0 .bash ) +ROLE=base +. /usr/local/bin/usr_local_tput.bash + +[ -z "$UPTMP"] && UPTMP=/usr/local/tmp + +# [ $( id -u ) -eq 0 ] || { ERROR "this must be run as root" ; exit 1 ; } + +# consider PIP_CONFIG_FILE [defaults] ini +export PLAY_PIP_CERT="/usr/local/etc/ssl/cacert-testforge.pem" + +if [ -n "$http_proxy" ] || [ -n "$https_proxy" ] ; then + INFO "proxy.sh YES http_proxy=$http_proxy https_proxy=$https_proxy" + if [ -d /etc/portage ] ; then + grep ^http_proxy /etc/portage/make.conf || \ + cat >> /etc/portage/make.conf << EOF +# BEGIN ANSIBLE MANAGED BLOCK proxy +http_proxy="$http_proxy" +https_proxy="$https_proxy" +# END ANSIBLE MANAGED BLOCK proxy +EOF + elif [ ! -f /etc/apt/apt.conf.d/80proxy.conf ] || ! grep -q Proxy /etc/apt/apt.conf.d/80proxy.conf ; then \ + cat > /etc/apt/apt.conf.d/80proxy.conf << EOF +# BEGIN ANSIBLE MANAGED BLOCK proxy +Acquire::http::Proxy "$http_proxy"; +Acquire::https::Proxy "$https_proxy"; +# END ANSIBLE MANAGED BLOCK proxy +EOF + fi + + # FixMe: should be able to remove check_certificate = off now + [ -z "$no_proxy" ] && no_proxy=localhost,127.0.0.1 + if [ ! -f /etc/wgetrc ] || grep -q "^http_proxy=$http_proxy" /etc/wgetrc ; then + cat >> /etc/wgetrc << EOF +# BEGIN ANSIBLE MANAGED BLOCK proxy +http_proxy=$http_proxy +https_proxy=$https_proxy +no_proxy=$no_proxy +ca-certificate=$PLAY_PIP_CERT +check_certificate = on +quiet = on +# END ANSIBLE MANAGED BLOCK proxy +EOF + fi + else + INFO "proxy.sh NO http_proxy=$http_proxy https_proxy=$https_proxy" + grep -q "^check_certificate = on" /etc/wgetrc || \ + cat >> /etc/wgetrc << EOF +# BEGIN ANSIBLE MANAGED BLOCK proxy +check_certificate = on +quiet = on +# END ANSIBLE MANAGED BLOCK proxy +EOF + fi +exit 0 diff --git a/overlay/Linux/usr/local/sbin/bootstrap_wheels.bash b/overlay/Linux/usr/local/sbin/bootstrap_wheels.bash new file mode 100755 index 0000000..f33028f --- /dev/null +++ b/overlay/Linux/usr/local/sbin/bootstrap_wheels.bash @@ -0,0 +1,11 @@ +#!/bin/bash +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +set -e + +ROLE=base +WD=$PWD +cd tmp +exit 0 + +[ -d wheels ] || mkdir wheels +cd wheels diff --git a/overlay/Linux/usr/local/sbin/box_clean_empty.bash b/overlay/Linux/usr/local/sbin/box_clean_empty.bash new file mode 100755 index 0000000..f01d0b7 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/box_clean_empty.bash @@ -0,0 +1,62 @@ +#!/bin/sh +# -*- mode: sh; tab-width: 8; coding: utf-8-unix -*- + +# in box, unix generic, builder generic +ROLE=hostvms +PREFIX=/var/local +prog=$( basename $0 .bash ) +. /usr/local/bin/usr_local_tput.bash + +if [ -d /etc/portage ] ; then + # maybe we should delete ALL the package.use and package.mask? + # we did most of them as workarounds or to set the distfiles.zip + rm -f "/etc/portage/package.use/grub.sh" + fi + +### CLEANUP TO SHRINK THE BOX ### + +# a fresh install probably shouldn't nag about news +# chroot "" /usr/bin/eselect news read all > /dev/null 2>&1 + +INFO "delete in /tmp and /var/tmp" +rm -rf /tmp/* +rm -rf /var/tmp/* + +# there's some leftover junk by gem installation in the root folder +# don't know where this is from (/root/.gem/specs/rubygems.org%80/...), but it should go... +# we use a global ruby by default +# ...probably hard coded path by mistake, report to upstream? Which upstream?!? +[ -d /root/.gem ] && rm -rf /root/.gem + +INFO "cleaning kernel" +ls -l /usr/src 2>/dev/null && \ + for elt in /usr/src/linux-*/ ; do + [ -f .config ] || continue + INFO "kernel make clean in $elt" + [ -d "$elt" ] || continue + ( cd "$elt" && make clean ) + done + +[ -f /root/bin/packer_clean_distfiles.bash ] && sh /root/bin/packer_clean_distfiles.bash + +INFO "fill all free hdd space with zeros" +if df | grep /boot$ ; then + dd if=/dev/zero of="/boot/EMPTY" bs=1M 2>/dev/null + rm "/boot/EMPTY" + sync +fi + +dd if=/dev/zero of="/EMPTY" bs=1M 2>/dev/null +rm "/EMPTY" +sync + +INFO "fill all swap space with zeros and recreate swap" +cat /proc/swaps |grep ^/ | cut -f 1 -d ' '| while read dev ; do + swapoff $dev || continue + shred -n 0 -z $dev + # FixMe: label? + mkswap $dev + sync +done + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/box_gentoo_emerge.bash b/overlay/Linux/usr/local/sbin/box_gentoo_emerge.bash new file mode 100755 index 0000000..9256c81 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/box_gentoo_emerge.bash @@ -0,0 +1,51 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +prog=$( basename $0 .bash ) +ROLE=base +LOG_DIR=/usr/local/var/logs/portage +[ -d $LOG_DIR ] || mkdir -p $LOG_DIR + +declare -a ARGS +if [ "$#" -eq 1 ] ; then + ARGS=( "$1" ) + LOG=$( basename $1 ).log + elif [ "$#" -eq 0 ] ; then + ARGS="@world" + LOG=world.log + elif false && [ -f world.lib ] ; then # ? + ARGS="$( grep -v '^#' world.lib )" + LOG=world.log + else + ARGS=("$@") + LOG=world.log + fi + +if mount | grep -q ' on /mnt/tmp' ; then + export TMPDIR=/mnt/tmp +# else +# echo "WARN: /mnt/tmp not mounted" + fi + +# --changed-deps --deep --update +LARGS="-vb --changed-use --with-bdeps=y --changed-deps-report" +LARGS="$LARGS --backtrack=30 --ignore-built-slot-operator-deps=y --keep-going" + +# Skips the packages specified on the command-line that have already been installed. +LARGS="$LARGS --noreplace" + +# LARGS="$LARGS --exclude " +LOG=$LOG_DIR/$LOG +export PYTHONPATH= +echo INFO: $LARGS $ARGS >> $LOG 2>&1 +nice python$BASE_PYTHON3_MINOR $( which emerge ) $LARGS $ARGS >> $LOG 2>&1 +[ $? -ne 0 ] && exit $? +if grep ImportError $LOG ; then + echo ERROR: ImportError $ARGS && exit 10 +elif grep ParseError $LOG ; then + echo ERROR: ParseError $ARGS && exit 11 +elif grep 'Your current profile is invalid' $LOG ; then + echo ERROR: Your current profile is invalid $ARGS && exit 12 +fi + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/debian_cache_to_archives.bash b/overlay/Linux/usr/local/sbin/debian_cache_to_archives.bash new file mode 100755 index 0000000..2f99c75 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/debian_cache_to_archives.bash @@ -0,0 +1,26 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter + +ROLE=base + +[ -z "$CACHE" ] && CACHE=/mnt/o/Cache/Apt/Debian/10.6 +[ -d "$CACHE" ] || exit 1$? + +[ -d /etc/apt ] || exit 0 + +cd $CACHE || exit 2 + +[ -d var/cache/apt/archives ] || mkdir -p var/cache/apt/archives + +find *.deb -type f -name \*.deb | while read file; do + base=$( basename $file ) + [ ! -d /var/cache/apt/archives/ ] || \ + [ -e /var/cache/apt/archives/$base ] || ln -s $PWD/$file /var/cache/apt/archives/$base + [ -f var/cache/apt/archives/$base -a ! -h var/cache/apt/archives/$base ] && rm var/cache/apt/archives/$base + [ -e var/cache/apt/archives/$base ] || ln -s $PWD/$file var/cache/apt/archives/$base + done + + +exit 0 + diff --git a/overlay/Linux/usr/local/sbin/debian_elts_to_uris.bash b/overlay/Linux/usr/local/sbin/debian_elts_to_uris.bash new file mode 100755 index 0000000..c340c7c --- /dev/null +++ b/overlay/Linux/usr/local/sbin/debian_elts_to_uris.bash @@ -0,0 +1,13 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- + +ROLE=base +[ "$#" -eq 0 ] && set -- *.elts + +for elt in "$@" ; do + base=$( basename $elt .elts ) + [ -f $base.uris ] && continue + apt-get install --print-uris $( cat $elt ) > $base.uris 2>$base.errs + done + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/debian_uris_to_urls.bash b/overlay/Linux/usr/local/sbin/debian_uris_to_urls.bash new file mode 100755 index 0000000..ebf5028 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/debian_uris_to_urls.bash @@ -0,0 +1,31 @@ +#!/bin/sh +# -*-mode: sh; tab-width: 8; coding: utf-8-unix -*- +# filter or .uris + +ROLE=base +[ -z "$CACHE" ] && CACHE=/mnt/o/Cache/Apt/Debian/10.6 +[ -d "$CACHE" ] || mkdir $CACHE # || exit 1$? + +# debian --print-uris +if [ $? -eq 0 ] ; then + # filter +grep 'https*://' | \ + sed -e 's@ftp://[^ ]*@@g' -e 's@.*https*://@https://@g' -e "s@'.*@@g" | \ + while read line ; do + for url in $line ; do + base=`basename "$url"` + pre=`sed -e "s@https*://@${CACHE}@" <<< $url` + [ -e $pre ] && break + echo $line + break + done + done + fi +for elt in "$@" ; do + base=$( basename $elt .elts ) + [ -s $base.urls ] && continue + sh $0 < $elt > $base.urls + [ -s $base.urls ] || rm $base.urls + done + +exit 0 diff --git a/overlay/Linux/usr/local/sbin/gentoo_scurl_urls.sh b/overlay/Linux/usr/local/sbin/gentoo_scurl_urls.sh new file mode 100755 index 0000000..4511185 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/gentoo_scurl_urls.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# filter - arguments are to wget - quoted? + +prog=$( basename $0 .bash ) +prog=ScurlU +ROOTDIR=/mnt/i/net/Http +ROLE=base +CACHE=/usr/portage/distfiles + +. /usr/local/bin/proxy_curl_lib.bash + +route | grep -q ^def || { echo ERROR: not connected ; exit 1 ; } + +. /usr/local/bin/usr_local_tput.bash +FETCHCOMMAND='/usr/local/bin/scurl.bash --force-directories --directory-prefix "\${DISTDIR}" -- "\${URI}"' + +# RARGS="--retry 1 --connect-timeout 10" +if [ "$#" -eq 0 ] ; then + LARGS="--force-directories --directory-prefix $ROOTDIR" + else + LARGS="$@" +fi +cp /dev/null /tmp/$prog$$.urls + +# //www.simplesystems.org/users/bfriesen/public-key.txt no https: +# https://opencoder.net/WayneDavison.key cloudflare 403 +# https://www.simplesystems.org/users/bfriesen/public-key.txt 503 +# https://tiswww.case.edu/php/chet/gpgkey.asc 500 timeout +# https://botan.randombit.net/pgpkey.txt no tls1.3 +# https://sourceware.org/elfutils/ftp/gpgkey-1AA44BE649DE760A.gpg no tls1.3 +# https://gnutls.org/gnutls-release-keyring.gpg no tls1.3 + +retval=0 +# NOT 1.3 -e 's@^https://distfiles.gentoo.org/distfiles/[^ ]* https://pypi.python.org/@https://pypi.python.org/@' +grep ^http | \ + sed -e 's@ftp://[^ ]*@@' \ + -e 's/http:/https:/' \ + -e 's@^https://distfiles.gentoo.org/distfiles/openpgp-keys-[^ ]*.asc @@' \ + -e 's@https*://distfiles.gentoo.org@https://gentoo.osuosl.org@g' \ + -e 's@https://gentoo.osuosl.org@https://mirror.leaseweb.com/gentoo@g' \ + -e 's@https*://download.sourceforge.net@https://download.sourceforge.net@g' | \ + while read urls ; do + url=`echo $urls|sed -e 's@ .*@@'` + base=`basename "$url"` + [ -e $CACHE/$base ] && echo $CACHE/$base && continue + base=`echo $url | sed -e 's@ .*@@' -e 's@https*://@@'` + [ -e $ROOTDIR/"$base" ] && echo $ROOTDIR/"$base" && continue + for url in $urls ; do + for no in "${NOTLSV3[@]}" ; do + [[ $url =~ $no ]] && continue + done + domain=`sed -e 's@/.*@@' <<< $base` + ip=`tor-resolve $domain` + if [ $? -eq 0 -a -n "$ip" ] ; then + a=`proxy_ami_cloudflared $ip` + [ $? -eq 0 -a "$a" = True ] && \ + WARN $url Cloudflared $ip $no && \ + continue + fi + + DBUG $prog /usr/local/bin/scurl.bash $LARGS -- $RARGS $url + /usr/local/bin/scurl.bash $LARGS -- $RARGS $url || { + retval=$? + continue + } + break + done + done + +exit $retval diff --git a/overlay/Linux/usr/local/sbin/gentoo_sec-keys_overlay.bash b/overlay/Linux/usr/local/sbin/gentoo_sec-keys_overlay.bash new file mode 100755 index 0000000..947b5a6 --- /dev/null +++ b/overlay/Linux/usr/local/sbin/gentoo_sec-keys_overlay.bash @@ -0,0 +1,62 @@ +#!/bin/bash +# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*- + +shopt -s nullglob || exit 1 + +prog=`basename $0 .bash` +ROLE=base +export PATH=/sbin:$PATH + +PREFIX=/usr/local +. /usr/local/bin/usr_local_tput.bash || exit 2 + +DEST=$PREFIX/portage/testforge/sec-keys +FROM=/usr/portage/sec-keys + +cd / +grep /~sam/ /usr/portage/sec-keys/*/*d| \ + sed -e 's@.*/@@' -e 's/"//' -e 's/.*-//'|grep -v P | \ + while read f;do + b=`ls /usr/portage/distfiles/*"$f"`|| continue; + a=`readlink "$b"`; + echo $a;[ -h "$a" ] && continue; + echo $b; + done | \ +sed -e 's@\.\./\.\.@/i@'|zip -m9 --symlinks sam.zip -@ + +cd $FROM +# +tar cf - *-* | tar xf - --keep-newer-files -C $DEST 2>/dev/null >/dev/null + +[ -d $DEST ] || mkdir -p $DEST +cd $FROM +i=0 +for dir in *-*; do + [ -d $dir ] || continue + [ -d $DEST/$dir ] || mkdir $DEST/$dir + ls $dir/*ebuild >/dev/null 2>/dev/null || { WARN no *ebuild in $dir ; continue ; } + for file in $dir/*ebuild ; do + [ -f $DEST/$file ] && [ $DEST/$file -nt $FROM/$file ] && continue + sed -e 's/^LICENSE=/RESTRICT="mirror"\nLICENSE=/' > $DEST/$file < $FROM/$file + if grep -q 'Mirrored from ' $FROM/$file ; then + url="`grep 'Mirrored from ' $FROM/$file|sed -e 's/.*Mirrored from //' -e 's/ .*//'`" + if [ -n "$url" ] ; then + i=`expr $i + 1` + rep=`sed -e 's/[$]/\\\\$/g' -e 's/[&]/\\\\&/g' <<< $url` + # could change some keyservers here + rep=`sed -e 's/http:/https:/' <<< $rep` + DBUG rep="$rep" + sed -e "s@https://dev.gentoo.org/.sam/[^ \"]*@$rep@" \ + -i $DEST/$file + fi + fi + cd $DEST/$dir + for dfile in $dir/*ebuild ; do + ddir=`dirname $dfile` + cd $ddir + ebuild manifest *ebuild + done + cd $DEST + done +done +INFO $i $DEST diff --git a/overlay/Linux/usr/local/share/ansible/library/Makefile b/overlay/Linux/usr/local/share/ansible/library/Makefile new file mode 100644 index 0000000..01da6f3 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/Makefile @@ -0,0 +1,90 @@ +# -*- mode: Makefile; tab-width: 8; coding: utf-8-unix -*- + +PENV=env http_proxy=http://127.0.0.1:3128 \ + https_proxy=http://127.0.0.1:3128 \ + no_proxy="localhost,127.0.0.1" +NENV=env no_proxy=localhost,127.0.0.1 \ + https_proxy= socks_proxy= http_proxy= \ + +default:: test + +.SUFFIXES: .yml .json + +.yml.json: + yaml2json.bash < $< > $@ + +test:: test_test test_net test_local +test_net:: + $(MAKE) $(MFLAGS) TARGET=ipleak.net PDB="" ENV="$(PENV)" targets cleanup + $(MAKE) $(MFLAGS) TARGET=python.org PDB="" ENV="$(PENV)" targets cleanup +test_local:: + $(MAKE) $(MFLAGS) TARGET=polipo PDB="" ENV="" targets cleanup +test_test:: + which firefox || exit 0 + $(NENV) /var/local/bin/python2.bash selenium_test.py test + $(MAKE) $(MFLAGS) cleanup +test_keepassxc:: + [ ! -f /home/devuan/Passwords.kdbx ] || \ + ( cd .. ; \ + ANSIBLE_KEEPASSXC_PASSWORD=foobar ansible -i hosts.yml \ + -c local -m ansible-keepassxc \ + -a 'database=/home/devuan/Passwords.kdbx entry=test_h@creep.im group=/Root/Xmpp/Chat' \ + localhost ) + +debug:: tests/selenium_test-python.org-firefox.json + $(PENV) /var/local/bin/python2.bash -m pdb selenium_test.py test + $(PENV) /var/local/bin/python2.bash -m pdb selenium_test.py $< + +cleanup:: + @ps ax | grep -v grep | grep geckodriver && killall geckodriver /usr/bin/geckodriver 2>/dev/null || true + @ps ax | grep -v grep | grep /firefox && killall /usr/lib64/firefox/firefox 2>/dev/null || true + @ps ax | grep -v grep | grep phantomjs && killall phantomjs 2>/dev/null || true + +targets:: +# [ -z "${DISPLAY}" ] && xvfb-run $(MAKE) $(MFLAGS) TARGET=$(TARGET) ENV="" /tmp/firefox-$(TARGET).log \ +# || $(MAKE) $(MFLAGS) TARGET=$(TARGET) ENV="$(ENV)" /tmp/firefox-$(TARGET).log + $(MAKE) $(MFLAGS) TARGET=$(TARGET) ENV="$(ENV) MOZ_HEADLESS=1" /tmp/firefox-$(TARGET).log + $(MAKE) $(MFLAGS) TARGET=$(TARGET) ENV="$(ENV)" /tmp/phantomjs-$(TARGET).log + +/tmp/chromium-$(TARGET).log:: + +/tmp/firefox-$(TARGET).log:: tests/selenium_test-$(TARGET)-firefox.json clean + rm -f /tmp/test_ipleak.net_firefox_* + $(ENV) /var/local/bin/python2.bash $(PDB) selenium_test.py tests/selenium_test-$(TARGET)-firefox.json > \ + /tmp/firefox-$(TARGET).out \ + && jq .results < /tmp/firefox-$(TARGET).out > /tmp/firefox-$(TARGET).json \ + || echo ERROR: $@ + [ ! -f /tmp/firefox-$(TARGET).log ] || cat /tmp/firefox-$(TARGET).log + [ ! -f /tmp/geckodriver-$(TARGET).log ] || cat /tmp/geckodriver-$(TARGET).log + [ ! -f /tmp/test_$(TARGET)_failed.png ] || fbi /tmp/test_$(TARGET)_failed.png + +/tmp/waterfox-$(TARGET).log:: tests/selenium_test-$(TARGET)-waterfox.json clean + rm -f /tmp/test_ipleak.net_waterfox_* + $(ENV) /var/local/bin/python2.bash $(PDB) selenium_test.py tests/selenium_test-$(TARGET)-waterfox.json > \ + /tmp/waterfox-$(TARGET).out \ + && jq .results < /tmp/waterfox-$(TARGET).out > /tmp/waterfox-$(TARGET).out \ + || echo ERROR: $@ + [ ! -f /tmp/waterfox-$(TARGET).log ] || cat /tmp/waterfox-$(TARGET).log + [ ! -f /tmp/geckodriver-$(TARGET).log ] || cat /tmp/geckodriver-$(TARGET).log + [ ! -f /tmp/test_$(TARGET)_failed.png ] || fbi /tmp/test_$(TARGET)_failed.png + +/tmp/phantomjs-$(TARGET).log:: tests/selenium_test-$(TARGET)-phantomjs.json clean + rm -f /tmp/test_ipleak.net_phantomjs_* + $(ENV) /var/local/bin/python2.bash $(PDB) selenium_test.py tests/selenium_test-$(TARGET)-phantomjs.json > \ + /tmp/phantomjs-$(TARGET).out \ + && jq .results < /tmp/phantomjs-$(TARGET).out > /tmp/phantomjs-$(TARGET).json \ + || echo ERROR: $@ + [ ! -f /tmp/phantomjs-$(TARGET).log ] || cat /tmp/phantomjs-$(TARGET).log + [ ! -f /tmp/ghostdriver-$(TARGET).log ] || cat /tmp/ghostdriver-$(TARGET).log + [ ! -f /tmp/test_$(TARGET)_failed.png ] || fbi /tmp/test_$(TARGET)_failed.png + @ps ax | grep -v grep | grep phantomjs && killall phantomjs 2>/dev/null || true + +tests/selenium_test-$(TARGET)-phantomjs.json:: tests/selenium_test-$(TARGET)-firefox.json + sed -e 's/geckodriver/ghostdriver/' -e 's/firefox/phantomjs/' < $< > $@ + + +clean:: + @rm -f /tmp/firefox-$(TARGET).log /tmp/geckodriver-$(TARGET).log + @rm -f /tmp/phantomjs-$(TARGET).log /tmp/ghostdriver-$(TARGET).log + @rm -f /tmp/test_$(TARGET)_failed.png + @rm -f geckodriver.log ghostdriver.log diff --git a/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc new file mode 120000 index 0000000..147f1c5 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc @@ -0,0 +1 @@ +ansible-keepassxc.py \ No newline at end of file diff --git a/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py new file mode 100755 index 0000000..79fbe6d --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py @@ -0,0 +1,190 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +IMPORT_ERR = None +try: +# import _argon2_xffi_bindings + import pykeepass as keepass +except ImportError: + IMPORT_ERR = traceback.format_exc() + +DOCUMENTATION = r''' +--- +module: ansible-keepassxc + +short_description: Module to read credentials from KeePassXC + +version_added: "0.0.1" + +description: Module to read credentials from KeePassXC + +options: + database: + description: Path to database file + required: true + type: str + password: + description: Database Password + required: true + type: str + keyfile: + description: Path to key file + required: false + type: str + entry: + description: Entry name for the attribute to fetch + required: true + type: str + group: + decription: Group name that the Entry belongs to + required: false + type: str + +author: + - Jeremy Lumley (@jlumley) +''' + +EXAMPLES = r''' +# Fetch the credentials for the server_1 entry in any group +- name: Fetch server_1 credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "s3cure_p4550rd" + entry: "server_1" + +# Fetch the reddit entry in the social group +- name: Fetching reddit credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "sup3r_s3cure_p4550rd" + entry: "reddit" + group: "social" + +# Fetch a custom strig attribute from the github entry +- name: Fetch Github API Token + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "d0pe_s3cure_p4550rd" + keyfile: "/secrets/top_secret_key" + entry: "github" + group: "development" + +''' + +RETURN = r''' +# Return values +username: + description: Username of entry if present + type: str + returned: always + sample: 's3cr3t_us3r' +password: + description: Password of entry if present + type: str + returned: always + sample: 's3cr3t_p455word' +url: + description: Url of entry if present + type: str + returned: always + sample: 'http://reddit.com' +custom_fields: + description: dictionary containing all custom fields + type: dict + returned: always + sample: False +no_log: + description: suppress logging of password + type: bool + returned: never + sample: False +''' + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + database = dict(type='str', required=True), + password = dict(type='str', required=False, + default=os.environ.get('ANSIBLE_KEEPASSXC_PASSWORD')), + keyfile = dict(type='str', required=False, default=None), + entry = dict(type='str', required=True), + group = dict(type='str', required=False), + no_log = dict(type='bool', required=False, default=False), + ) + + # seed the result dict in the object + result = dict( + changed=False, + username='', + password='', + url='', + custom_fields={} + ) + + # Currently no support for a check_mode this maybe added later if + # functionality to modify the database is added later + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False, + no_log=False + ) + + if IMPORT_ERR: + module.fail_json( + msg=missing_required_lib("pykeepass"), + exception=IMPORT_ERR + ) + + # unlock local keepass database + try: + kp = keepass.PyKeePass( + module.params['database'], + password=module.params['password'], + keyfile=module.params['keyfile']) + except keepass.exceptions.CredentialsError: + module.fail_json(msg='Invalid Credentials') + + # find entry + entry = kp.find_entries( + title=module.params['entry'], + group=module.params['group'] + ) + + # fail is entry is not present + if not entry: + module.fail_json(msg=f"Unable to find entry: {module.params['entry']}") + + else: + entry = entry[0] + custom_field_keys = entry._get_string_field_keys(exclude_reserved=True) + custom_fields = dict() + for key in custom_field_keys: + custom_fields[key] = entry.get_custom_property(key) + result = dict ( + changed=False, + username=entry.username, + password=entry.password, + url=entry.url, + custom_fields=custom_fields + ) + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() + + diff --git a/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.diff b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.diff new file mode 100644 index 0000000..e99f43c --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.diff @@ -0,0 +1,69 @@ +*** ansible-keepassxc.py.dst 2022-04-07 15:14:39.222017589 +0000 +--- ansible-keepassxc.py 2022-04-07 15:12:28.010013156 +0000 +*************** +*** 2,11 **** +--- 2,22 ---- + + # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + ++ import os ++ import traceback ++ from ansible.module_utils.basic import AnsibleModule, missing_required_lib ++ ++ IMPORT_ERR = None ++ try: ++ import _argon2_xffi_bindings ++ import pykeepass as keepass ++ except ImportError: ++ IMPORT_ERR = traceback.format_exc() ++ + DOCUMENTATION = r''' + --- + module: ansible-keepassxc + + short_description: Module to read credentials from KeePassXC +*************** +*** 89,112 **** + type: dict + returned: always + sample: False + ''' + +- from ansible.module_utils.basic import AnsibleModule, missing_required_lib +- import traceback +- +- IMPORT_ERR = None +- try: +- import pykeepass as keepass +- except ImportError: +- IMPORT_ERR = traceback.format_exc() +- + def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + database = dict(type='str', required=True), +! password = dict(type='str', required=True), + keyfile = dict(type='str', required=False, default=None), + entry = dict(type='str', required=True), + group = dict(type='str', required=False), + ) + +--- 100,115 ---- + type: dict + returned: always + sample: False + ''' + + def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + database = dict(type='str', required=True), +! password = dict(type='str', required=False, +! default=os.environ.get('ANSIBLE_KEEPASSXC_PASSWORD')), + keyfile = dict(type='str', required=False, default=None), + entry = dict(type='str', required=True), + group = dict(type='str', required=False), + ) + diff --git a/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.dst b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.dst new file mode 100644 index 0000000..380b882 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.py.dst @@ -0,0 +1,180 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ansible-keepassxc + +short_description: Module to read credentials from KeePassXC + +version_added: "0.0.1" + +description: Module to read credentials from KeePassXC + +options: + database: + description: Path to database file + required: true + type: str + password: + description: Database Password + required: true + type: str + keyfile: + description: Path to key file + required: false + type: str + entry: + description: Entry name for the attribute to fetch + required: true + type: str + group: + decription: Group name that the Entry belongs to + required: false + type: str + +author: + - Jeremy Lumley (@jlumley) +''' + +EXAMPLES = r''' +# Fetch the credentials for the server_1 entry in any group +- name: Fetch server_1 credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "s3cure_p4550rd" + entry: "server_1" + +# Fetch the reddit entry in the social group +- name: Fetching reddit credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "sup3r_s3cure_p4550rd" + entry: "reddit" + group: "social" + +# Fetch a custom strig attribute from the github entry +- name: Fetch Github API Token + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "d0pe_s3cure_p4550rd" + keyfile: "/secrets/top_secret_key" + entry: "github" + group: "development" + +''' + +RETURN = r''' +# Return values +username: + description: Username of entry if present + type: str + returned: always + sample: 's3cr3t_us3r' +password: + description: Password of entry if present + type: str + returned: always + sample: 's3cr3t_p455word' +url: + description: Url of entry if present + type: str + returned: always + sample: 'http://reddit.com' +custom_fields: + description: dictionary containing all custom fields + type: dict + returned: always + sample: False +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import traceback + +IMPORT_ERR = None +try: + import pykeepass as keepass +except ImportError: + IMPORT_ERR = traceback.format_exc() + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + database = dict(type='str', required=True), + password = dict(type='str', required=True), + keyfile = dict(type='str', required=False, default=None), + entry = dict(type='str', required=True), + group = dict(type='str', required=False), + ) + + # seed the result dict in the object + result = dict( + changed=False, + username='', + password='', + url='', + custom_fields={} + ) + + # Currently no support for a check_mode this maybe added later if + # functionality to modify the database is added later + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False + ) + + if IMPORT_ERR: + module.fail_json( + msg=missing_required_lib("pykeepass"), + exception=IMPORT_ERR + ) + + # unlock local keepass database + try: + kp = keepass.PyKeePass( + module.params['database'], + password=module.params['password'], + keyfile=module.params['keyfile']) + except keepass.exceptions.CredentialsError: + module.fail_json(msg='Invalid Credentials') + + # find entry + entry = kp.find_entries( + title=module.params['entry'], + group=module.params['group'] + ) + + # fail is entry is not present + if not entry: + module.fail_json(msg=f"Unable to find entry: {module.params['entry']}") + + else: + entry = entry[0] + custom_field_keys = entry._get_string_field_keys(exclude_reserved=True) + custom_fields = dict() + for key in custom_field_keys: + custom_fields[key] = entry.get_custom_property(key) + result = dict ( + changed=False, + username=entry.username, + password=entry.password, + url=entry.url, + custom_fields=custom_fields + ) + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() + + diff --git a/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.urls b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.urls new file mode 100644 index 0000000..b86b279 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/ansible-keepassxc.urls @@ -0,0 +1 @@ +https://github.com/jlumley/ansible-keepassxc/raw/main/keepassxc.py diff --git a/overlay/Linux/usr/local/share/ansible/library/eselect b/overlay/Linux/usr/local/share/ansible/library/eselect new file mode 100644 index 0000000..e1fb714 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/eselect @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jakub Jirutka +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: eselect +author: Jakub Jirutka +version_added: "unknown" +short_description: Module for Gentoo's eselect +description: + - Module for Gentoo's multi-purpose configuration and management tool eselect. +options: + module: + description: + - Name of the eselect module to run. + required: true + action: + description: + - Action of the eselect module to run. + default: set + options: + description: + - An optional options for the eselect module (space separated). + required: false + aliases: [value, target] +''' + +EXAMPLES = ''' + - eselect: module=editor target=/usr/bin/vim + + - eselect: module=postgresql action=reset +''' + + +def run_eselect(module, *args): + cmd = 'eselect --brief --colour=no %s' % ' '.join(args) + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='eselect failed') + else: + return out + + +def action_set(module, emodule, target): + current = run_eselect(module, emodule, 'show').strip() + if target != current: + run_eselect(module, emodule, 'set', target) + return True + else: + return False + + +def main(): + module = AnsibleModule( + argument_spec={ + 'module': {'required': True}, + 'action': {'default': 'set'}, + 'options': {'aliases': ['value', 'target'], 'default': ''} + } + ) + + emodule, action, options = (module.params[key] for key in ['module', 'action', 'options']) + changed = True + msg = '' + + if action == 'set': + changed = action_set(module, emodule, options) + else: + msg = run_eselect(module, emodule, action, options) + + module.exit_json(changed=changed, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/overlay/Linux/usr/local/share/ansible/library/geckodriver.log b/overlay/Linux/usr/local/share/ansible/library/geckodriver.log new file mode 100644 index 0000000..9f5b5ea --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/geckodriver.log @@ -0,0 +1,4 @@ +1649339287010 geckodriver INFO geckodriver 0.20.0 +1649339287356 geckodriver INFO Listening on 127.0.0.1:59175 +1649339399259 geckodriver INFO geckodriver 0.20.0 +1649339399263 geckodriver INFO Listening on 127.0.0.1:15991 diff --git a/overlay/Linux/usr/local/share/ansible/library/keepassxc.py b/overlay/Linux/usr/local/share/ansible/library/keepassxc.py new file mode 100644 index 0000000..43ed610 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/keepassxc.py @@ -0,0 +1,180 @@ +#!/usr/bin/python + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ansible-keepassxc + +short_description: Module to read credentials from KeePassXC + +version_added: "0.0.1" + +description: Module to read credentials from KeePassXC + +options: + database: + description: Path to database file + required: true + type: str + password: + description: Database Password + required: true + type: str + keyfile: + description: Path to key file + required: false + type: str + entry: + description: Entry name for the attribute to fetch + required: true + type: str + group: + decription: Group name that the Entry belongs to + required: false + type: str + +author: + - Jeremy Lumley (@jlumley) +''' + +EXAMPLES = r''' +# Fetch the credentials for the server_1 entry in any group +- name: Fetch server_1 credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "s3cure_p4550rd" + entry: "server_1" + +# Fetch the reddit entry in the social group +- name: Fetching reddit credentials + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "sup3r_s3cure_p4550rd" + entry: "reddit" + group: "social" + +# Fetch a custom strig attribute from the github entry +- name: Fetch Github API Token + jlumley.jlumley.ansible-keepassxc: + database: "/secrets/db.kdbx" + password: "d0pe_s3cure_p4550rd" + keyfile: "/secrets/top_secret_key" + entry: "github" + group: "development" + +''' + +RETURN = r''' +# Return values +username: + description: Username of entry if present + type: str + returned: always + sample: 's3cr3t_us3r' +password: + description: Password of entry if present + type: str + returned: always + sample: 's3cr3t_p455word' +url: + description: Url of entry if present + type: str + returned: always + sample: 'http://reddit.com' +custom_fields: + description: dictionary containing all custom fields + type: dict + returned: always + sample: False +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import traceback + +IMPORT_ERR = None +try: + import pykeepass as keepass +except ImportError: + IMPORT_ERR = traceback.format_exc() + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + database = dict(type='str', required=True), + password = dict(type='str', required=True), + keyfile = dict(type='str', required=False, default=None), + entry = dict(type='str', required=True), + group = dict(type='str', required=False), + ) + + # seed the result dict in the object + result = dict( + changed=False, + username='', + password='', + url='', + custom_fields={} + ) + + # Currently no support for a check_mode this maybe added later if + # functionality to modify the database is added later + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=False + ) + + if IMPORT_ERR: + module.fail_json( + msg=missing_required_lib("pykeepass"), + exception=IMPORT_ERR + ) + + # unlock local keepass database + try: + kp = keepass.PyKeePass( + module.params['database'], + password=module.params['password'], + keyfile=module.params['keyfile']) + except keepass.exceptions.CredentialsError: + module.fail_json(msg='Invalid Credentials') + + # find entry + entry = kp.find_entries( + title=module.params['entry'], + group=module.params['group'] + ) + + # fail is entry is not present + if not entry: + module.fail_json(msg=f"Unable to find entry: {module.params['entry']}") + + else: + entry = entry[0] + custom_field_keys = entry._get_string_field_keys(exclude_reserved=True) + custom_fields = dict() + for key in custom_field_keys: + custom_fields[key] = entry.get_custom_property(key) + result = dict ( + changed=False, + username=entry.username, + password=entry.password, + url=entry.url, + custom_fields=custom_fields + ) + + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() + + diff --git a/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.py b/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.py new file mode 100644 index 0000000..d8a819c --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.py @@ -0,0 +1,339 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +import sys + +__metaclass__ = type + +DOCUMENTATION = """ + author: Jesse Pretorius + connection: community.libvirt.libvirt_qemu + short_description: Run tasks on libvirt/qemu virtual machines + description: + - Run commands or put/fetch files to libvirt/qemu virtual machines using the qemu agent API. + notes: + - Currently DOES NOT work with selinux set to enforcing in the VM. + - Requires the qemu-agent installed in the VM. + - Requires access to the qemu-ga commands guest-exec, guest-exec-status, guest-file-close, guest-file-open, guest-file-read, guest-file-write. + version_added: "2.10" + options: + remote_addr: + description: Virtual machine name + default: inventory_hostname + vars: + - name: ansible_host + executable: + description: Shell to use for execution inside container + default: /bin/sh + vars: + - name: ansible_executable + virt_uri: + description: libvirt URI to connect to to access the virtual machine + default: qemu:///system + vars: + - name: ansible_libvirt_uri +""" + +import base64 +import json +import libvirt +import libvirt_qemu +import shlex +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.display import Display +from functools import partial +from os.path import exists, getsize + +display = Display() + + +REQUIRED_CAPABILITIES = [ + {'enabled': True, 'name': 'guest-exec', 'success-response': True}, + {'enabled': True, 'name': 'guest-exec-status', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-close', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-open', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-read', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-write', 'success-response': True} +] + + +class Connection(ConnectionBase): + ''' Local libvirt qemu based connections ''' + + transport = 'community.libvirt.libvirt_qemu' + # TODO(odyssey4me): + # Figure out why pipelining does not work and fix it + has_pipelining = False + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._host = self._play_context.remote_addr + + # Windows operates differently from a POSIX connection/shell plugin, + # we need to set various properties to ensure SSH on Windows continues + # to work + if getattr(self._shell, "_IS_WINDOWS", False): + self.has_native_async = True + self.always_pipeline_modules = True + self.module_implementation_preferences = ('.ps1', '.exe', '') + self.allow_executable = False + + def _connect(self): + ''' connect to the virtual machine; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + + self._virt_uri = self.get_option('virt_uri') + + self._display.vvv(u"CONNECT TO {0}".format(self._virt_uri), host=self._host) + try: + self.conn = libvirt.open(self._virt_uri) + except libvirt.libvirtError as err: + self._display.vv(u"ERROR: libvirtError CONNECT TO {0}\n{1}".format(self._virt_uri, to_native(err)), host=self._host) + self._connected = False + raise AnsibleConnectionFailure(to_native(err)) + + self._display.vvv(u"FIND DOMAIN {0}".format(self._host), host=self._host) + try: + self.domain = self.conn.lookupByName(self._host) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + request_cap = json.dumps({'execute': 'guest-info'}) + response_cap = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_cap, 5, 0)) + self.capabilities = response_cap['return']['supported_commands'] + self._display.vvvvv(u"GUEST CAPABILITIES: {0}".format(self.capabilities), host=self._host) + missing_caps = [] + for cap in REQUIRED_CAPABILITIES: + if cap not in self.capabilities: + missing_caps.append(cap['name']) + if len(missing_caps) > 0: + self._display.vvv(u"REQUIRED CAPABILITIES MISSING: {0}".format(missing_caps), host=self._host) + raise AnsibleConnectionFailure('Domain does not have required capabilities') + + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._host) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the virtual machine host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + + cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict')) + + if getattr(self._shell, "_IS_WINDOWS", False): + # Become method 'runas' is done in the wrapper that is executed, + # need to disable sudoable so the bare_run is not waiting for a + # prompt that will not occur + sudoable = False + + # Generate powershell commands + cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) + + # TODO(odyssey4me): + # Implement buffering much like the other connection plugins + # Implement 'env' for the environment settings + # Implement 'input-data' for whatever it might be useful for + request_exec = { + 'execute': 'guest-exec', + 'arguments': { + 'path': cmd_args_list[0], + 'capture-output': True, + 'arg': cmd_args_list[1:] + } + } + request_exec_json = json.dumps(request_exec) + + display.vvv(u"GA send: {0}".format(request_exec_json), host=self._host) + + # TODO(odyssey4me): + # Add timeout parameter + try: + result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0)) + except libvirt.libvirtError as err: + self._display.vv(u"ERROR: libvirtError EXEC TO {0}\n{1}".format(self._virt_uri, to_native(err)), host=self._host) + sys.stderr.write(u"ERROR: libvirtError EXEC TO {0}\n{1}\n".format(self._virt_uri, to_native(err))) + self._connected = False + raise AnsibleConnectionFailure(to_native(err)) + + display.vvv(u"GA return: {0}".format(result_exec), host=self._host) + + request_status = { + 'execute': 'guest-exec-status', + 'arguments': { + 'pid': result_exec['return']['pid'] + } + } + request_status_json = json.dumps(request_status) + + display.vvv(u"GA send: {0}".format(request_status_json), host=self._host) + + # TODO(odyssey4me): + # Work out a better way to wait until the command has exited + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + while not result_status['return']['exited']: + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + if result_status['return'].get('out-data'): + stdout = base64.b64decode(result_status['return']['out-data']) + else: + stdout = b'' + + if result_status['return'].get('err-data'): + stderr = base64.b64decode(result_status['return']['err-data']) + else: + stderr = b'' + + # Decode xml from windows + if getattr(self._shell, "_IS_WINDOWS", False) and stdout.startswith(b"#< CLIXML"): + stdout = _parse_clixml(stdout) + + display.vvv(u"GA stdout: {0}".format(to_text(stdout)), host=self._host) + display.vvv(u"GA stderr: {0}".format(to_text(stderr)), host=self._host) + + return result_status['return']['exitcode'], stdout, stderr + + def put_file(self, in_path, out_path): + ''' transfer a file from local to domain ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host) + + if not exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': out_path, + 'mode': 'wb+' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + # TODO(odyssey4me): + # Handle exception for file/path IOError + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + for chunk in iter(partial(in_file.read, BUFSIZE), b''): + try: + request_write = { + 'execute': 'guest-file-write', + 'arguments': { + 'handle': result_handle['return'], + 'buf-b64': base64.b64encode(chunk).decode() + } + } + request_write_json = json.dumps(request_write) + + display.vvvvv(u"GA send: {0}".format(request_write_json), host=self._host) + + result_write = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_write_json, 5, 0)) + + display.vvvvv(u"GA return: {0}".format(result_write), host=self._host) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from domain to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': in_path, + 'mode': 'r' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + request_read = { + 'execute': 'guest-file-read', + 'arguments': { + 'handle': result_handle['return'], + 'count': BUFSIZE + } + } + request_read_json = json.dumps(request_read) + + display.vvv(u"GA send: {0}".format(request_read_json), host=self._host) + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + while not result_read['return']['eof']: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.urls b/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.urls new file mode 100644 index 0000000..8b1355c --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/plugins/connection/libvirt_qemu.urls @@ -0,0 +1 @@ +https://github.com/ansible-community/community.libvirt-1.0.0/plugins/connection/libvirt_qemu.py diff --git a/overlay/Linux/usr/local/share/ansible/library/selenium_test.py b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py new file mode 100644 index 0000000..3c330a1 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py @@ -0,0 +1,750 @@ +#!/usr//local/bin/python2.sh +# -*-mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*- +# From: github.com/napalm255/ansible-selenium/library/selenium_test.py + +# (c) 2016, Brad Gibson +# +# This file is a 3rd Party module for Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""Ansible Selenium Module.""" + +DOCUMENTATION = ''' +--- +module: selenium_test +author: "Brad Gibson" +version_added: "2.3" +short_description: Run selenium tests +requires: [ selenium>=3.0.2 ] +description: + - Run selenium tests against provided URL. + - Use Clicks, Typing, Waiting and Assertions. +options: + url: + required: true + description: + - URL to run selenium tests against. + browser: + required: false + default: "phantomjs" + choices: [ "phantomjs", "firefox", "chrome" ] + description: + - Browser to use for testing. + browser_executable: + required: false + default: "" + browser_log: + required: false + default: "" + browser_timeout: + required: false + default: 30 + browser_service_args: + required: false + default: [] + webdriver_executable: + required: false + default: "" + webdriver_log: + required: false + default: "" + width: + required: false + default: 1024 + description: + - Browser screen width. + height: + required: false + default: 768 + description: + - Browser screen height. + title: + required: false + description: + - Title to validate after initial load. + screenshot: + required: false + default: false + description: + - Enable/Disable screenshots. + screenshot_when: + required: false + default: [ "error" ] + choices: [ "all", "start", "end", "error" ] + description: + - Enable/Disable screenshots. + screenshot_type: + required: false + default: "base64" + choices: [ "base64", "file" ] + description: + - Screenshot format. + screenshot_path: + required: false + default: "/tmp" + description: + - Screenshot path. + screenshot_prefix: + required: false + default: "selenium_" + description: + - Screenshot file prefix. + implicit_wait: + required: false + default: 20 + description: + - Implicit wait value when loading webpage. + explicit_wait: + required: false + default: 2 + description: + - Explicit wait value when loading webpage. + steps: + required: true + description: + - Steps to perform. + validate_cert: + required: false + default: true + description: + - Validate SSL certificate. +''' + +EXAMPLES = ''' +# run basic check against given url +- selenium_test: url=http://www.python.org +''' + +# selenium/webdriver/remote/webdriver.py +_W3C_CAPABILITY_NAMES = frozenset([ + 'acceptInsecureCerts', + 'browserName', + 'browserVersion', + 'platformName', + 'pageLoadStrategy', + 'proxy', + 'setWindowRect', + 'timeouts', + 'unhandledPromptBehavior', +]) +_OSS_W3C_CONVERSION = { + 'acceptSslCerts': 'acceptInsecureCerts', + 'version': 'browserVersion', + 'platform': 'platformName' +} +# caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower() + +# selenium-3.9.0-py2.7.egg/selenium/webdriver/common/proxy.py +ELTS = ['autodetect', 'ftpProxy', 'httpProxy', 'proxyAutoconfigUrl', 'sslProxy', 'noProxy', 'socksProxy', 'socksUsername', 'socksPassword'] +def add_to_capabilities(self, proxyTypeString, capabilities): + """ + DIRECT = ProxyTypeFactory.make(0, 'DIRECT') # Direct connection, no proxy (default on Windows). + MANUAL = ProxyTypeFactory.make(1, 'MANUAL') # Manual proxy settings (e.g., for httpProxy). + PAC = ProxyTypeFactory.make(2, 'PAC') # Proxy autoconfiguration from URL. + RESERVED_1 = ProxyTypeFactory.make(3, 'RESERVED1') # Never used. + AUTODETECT = ProxyTypeFactory.make(4, 'AUTODETECT') # Proxy autodetection (presumably with WPAD). + SYSTEM = ProxyTypeFactory.make(5, 'SYSTEM') # Use system settings (default on Linux). + UNSPECIFIED = ProxyTypeFactory.make(6, 'UNSPECIFIED') # Not initialized (for internal use). + """ + proxy_caps = {} + proxy_caps['proxyType'] = proxyTypeString + for elt in ELTS: + proxy_caps[elt] = getattr(self, elt) + capabilities['proxy'] = proxy_caps + +# pylint: disable = wrong-import-position +import sys,os # noqa +try: + if sys.version_info < (3, 0): + from urlparse import urlparse # noqa + else: + from urllib.parse import urlparse # noqa + URLPARSE_INSTALLED_ERR = '' +except ImportError as e: + URLPARSE_INSTALLED_ERR = 'ERROR: selenium_test.py urllib.parse import urlparse not imported.\n' + str(e) + +from ansible.module_utils.basic import AnsibleModule, jsonify # noqa +try: + from selenium import webdriver + from selenium.webdriver.common.keys import Keys + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from selenium.common.exceptions import NoSuchElementException, TimeoutException + SELENIUM_INSTALLED_ERR = '' +except ImportError as e: + SELENIUM_INSTALLED_ERR = 'ERROR: selenium_test.py selenium not imported.\n' + str(e) + + +class AnsibleSelenium(object): + """Ansible Selenium Class.""" + + def __init__(self, module): + """Init.""" + self.module = module + self.arg = lambda: None + for arg in self.module.params: + setattr(self.arg, arg, self.module.params[arg]) + + self.steps_num = len(self.arg.steps) - 1 + self.result = {'changed': False, + 'failed': False, + 'results': { + 'steps': [], + 'num': self.steps_num + }} + self.browser_oFd = None + self.browser = self._browser() + + def __enter__(self): + """Enter by loading website and return self.""" + # validate url + url_parsed = urlparse(self.arg.url) + if url_parsed.scheme not in ['http', 'https']: + self.failed('invalid url scheme: ' + url_parsed.scheme) + if not url_parsed.netloc: + self.failed('invalid url: empty netloc') + + # load browser + self.browser.get(self.arg.url) + self.result['browser_closed'] = False + + # validate title + if not self.arg.title: + #?mike how is this used? Set it to None if missing or blank? + self.result['results']['title'] = None + elif self.arg.title in self.browser.title: + self.result['results']['title'] = True + else: + self.result['results']['title'] = False + self.failed('title "' +self.browser.title +'" does not contain: ' + self.arg.title) + + # process steps + self.steps() + + return self + + def __exit__(self, type, value, traceback): + """Exit by closing and quitting the browser.""" + # pylint: disable = redefined-builtin + if self.browser_oFd: + self.browser_oFd.close() + self.browser_oFd = None + try: self.browser.close() + except: pass + try: self.browser.quit() + except: pass + self.result['browser_closed'] = True + + def _browser(self): + """Select browser and return object.""" + name = self.arg.browser + if 'phantomjs' in name: + return self._phantomjs() + elif 'firefox' in name: + return self._firefox() + elif 'chrome' in name: + return self._chrome() + else: + raise RuntimeError("Unrecognized browser " +name) + + def _dRaw_proxy(self): + # FixMe? socks_proxy + dRaw = dict() + # SYSTEM + dRaw["proxyType"] = "MANUAL" + if ('no_proxy' in os.environ and os.environ['no_proxy']): + dRaw['noProxy'] = os.environ['no_proxy'] + else: + dRaw['noProxy'] = 'localhost,127.0.0.1' + + if os.environ['http_proxy']: + o = urlparse(os.environ['http_proxy']) + if o.scheme not in ['http', 'https']: + self.failed('invalid http_proxy url scheme: ' + o.scheme) + if not o.netloc: + self.failed('invalid http_proxy url: empty netloc') + PROXY_HOST, PROXY_PORT = o.netloc.split(':') + # 'http://' + + dRaw['httpProxy'] = PROXY_HOST +':' +PROXY_PORT + if os.environ['https_proxy']: + o = urlparse(os.environ['https_proxy']) + if o.scheme not in ['http', 'https']: + self.failed('invalid https_proxy url scheme: ' + o.scheme) + if not o.netloc: + self.failed('invalid https_proxy url: empty netloc') + PROXY_HOST, PROXY_PORT = o.netloc.split(':') + # 'https://' + + dRaw['sslProxy'] = PROXY_HOST +':' +PROXY_PORT + return dRaw + + def _phantomjs(self): + """Use PhantomJS browser.""" +# Adding the list of possibilities (integer values) for the "network.proxy.type". +# 0 - Direct connection (or) no proxy. +# 1 - Manual proxy configuration +# 2 - Proxy auto-configuration (PAC). +# 4 - Auto-detect proxy settings. +# 5 - Use system proxy settings. + from selenium.webdriver.common.desired_capabilities import DesiredCapabilities + # DesiredCapabilities.PHANTOMJS.copy() + import warnings + warnings.filterwarnings("ignore", "Selenium support for PhantomJS has been deprecated.*", UserWarning) + dCapabilities = { + "browserName": "phantomjs", + "browserVersion": "", + "platform": "ANY", + "javascriptEnabled": True, + } + if not self.arg.browser_executable: + self.arg.browser_executable = 'phantomjs' + service_args = self.arg.browser_service_args + service_args += ['--ssl-protocol=any','--web-security=no'] + if ('http_proxy' in os.environ and os.environ['http_proxy']) or \ + ('https_proxy' in os.environ and os.environ['https_proxy']): + dRaw = self._dRaw_proxy() + dCapabilities['proxy'] = dRaw + # bogus - old + # --proxy-type= 'http' (default), 'none' (disable completely), or 'socks5' + service_args += ['--proxy='+dRaw['sslProxy'], '--proxy-type='+'http'] + # --ssl-protocol=TLSv1.2 + # --ssl-certificates-path= the location for custom CA certificates (if none set, uses SSL_CERT_DIR) + if not self.arg.validate_cert: + service_args.append('--ignore-ssl-errors=true') + if False: + # Starts in 'Remote WebDriver mode' (embedded GhostDriver): '[[:]]' (default '127.0.0.1:8910') + service_args.append('----webdriver=' +'127.0.0.1:8910') + # WebDriver Logging Level: (supported: 'ERROR', 'WARN', 'INFO', 'DEBUG') (default 'INFO') (NOTE: needs '--webdriver') + service_args.append('----webdriver-loglevel' +'INFO') + + if self.arg.webdriver_log: + service_args += ['--webdriver-logfile='+self.arg.webdriver_log] + + # service_args : A List of command line arguments to pass to PhantomJS + driver = webdriver.PhantomJS(service_args=service_args, + desired_capabilities=dCapabilities) + driver.set_window_size(self.arg.width, self.arg.height) + driver.set_page_load_timeout(self.arg.implicit_wait) + return driver + + def _firefox(self): + """Use Firefox browser.""" + # pylint: disable = no-self-use + dCapabilities = { + "browserName": "firefox", + "acceptInsecureCerts": True, + } + if self.arg.validate_cert: + dCapabilities["acceptInsecureCerts"] = False + + # from selenium.webdriver.firefox.firefox_profile import FirefoxProfile + # firefox_profile = FirefoxProfile(profile_directory=None) + # warnings.warn("Please use FirefoxOptions to set browser profile", DeprecationWarning, + + if ('http_proxy' in os.environ and os.environ['http_proxy']) or \ + ('https_proxy' in os.environ and os.environ['https_proxy']): + dRaw = self._dRaw_proxy() + from selenium.webdriver.common.proxy import Proxy + dRaw["proxyType"] = "MANUAL" + oProxy = Proxy(raw=dRaw) + #dRawCopy = dRaw.copy() + #dRawCopy["proxyType"] = "MANUAL".lower() + #? dCapabilities['proxy'] = dRawCopy + else: + oProxy = None + + from selenium.webdriver.firefox.firefox_binary import FirefoxBinary + if not self.arg.browser_executable: + # their code looks for firefox or iceweasel + self.arg.browser_executable = 'firefox' + # should use FirefoxBinary.which() to make sure its on the PATH + self.arg.browser_executable = which(self.arg.browser_executable) + assert self.arg.browser_executable + + # A file object to redirect the firefox process output to. It can be sys.stdout. + # morons: not a file + if not self.arg.browser_log: + self.browser_oFd = None + else: + self.browser_oFd = open(self.arg.browser_log, 'w') + + firefox_binary = FirefoxBinary(firefox_path=self.arg.browser_executable, log_file=self.browser_oFd) + # firefox_binary.add_command_line_options() + + if self.arg.webdriver_executable == 'legacy': + dCapabilities["marionette"] = False + self.arg.webdriver_executable = '' + else: + dCapabilities["marionette"] = True + if not self.arg.webdriver_executable: + self.arg.webdriver_executable = 'geckodriver' + if self.arg.webdriver_log: + log_path = self.arg.webdriver_log + else: + log_path = "geckodriver.log" + self.arg.webdriver_executable = which(self.arg.webdriver_executable) + assert self.arg.webdriver_executable + + # options overrides capabilities + # As some of the options, such as `firefox_profile` and + # `options.profile` are mutually exclusive, precedence is + # given from how specific the setting is. `capabilities` is the + # least specific keyword argument, followed by `options`, + # followed by `firefox_binary` and `firefox_profile`. + + #geckodriver + # -v Log level verbosity (-v for debug and -vv for trace level) + # -b, --binary Path to the Firefox binary + # --log Set Gecko log level [possible values: fatal, error, warn, info, config, debug, trace] + service_args = self.arg.browser_service_args + + kwargs = dict(firefox_profile=None, + firefox_binary=firefox_binary, + timeout=self.arg.browser_timeout, + capabilities=dCapabilities, + proxy=oProxy, + executable_path=self.arg.webdriver_executable, + options=None, + log_path=log_path, + firefox_options=None, + service_args=service_args) + driver = webdriver.Firefox(**kwargs) + return driver + + def _chrome(self): + """Use Chrome browser.""" + # pylint: disable = no-self-use + if not self.arg.webdriver_executable: + self.arg.webdriver_executable = "chromedriver" + if not self.arg.browser_executable: + self.arg.browser_executable = '/opt/chromium-browser/chromium-launcher.sh' + if not self.arg.browser_service_args: + self.arg.browser_service_args = None + if not self.arg.webdriver_log: + self.arg.webdriver_log = None + # options get added to desired_capabilities - desired_capabilities.update(options.to_capabilities()) + # warnings.warn('use options instead of chrome_options', DeprecationWarning) + kwargs = dict(executable_path=self.arg.webdriver_executable, + port=0, + options=None, + service_args=self.arg.browser_service_args, + desired_capabilities=None, + service_log_path=self.arg.webdriver_log, + chrome_options=None) + # selenium/webdriver/chrome/webdriver.py IGNORES the following arguments + # proxy=None, keep_alive=False, file_detector=None, to + # selenium-3.9.0-py2.7.egg/selenium/webdriver/remote/webdriver.py + + driver = webdriver.Chrome(**kwargs) + return driver + + def failed(self, msg, step=None): + """Failed.""" + # self.result['failed'] = True + self.result['msg'] = msg + self.result['failed'] = True + if step: + step['error'] = True + step['msg'] = msg + when = self.arg.screenshot_when + if 'all' in when or 'error' in when: + step['screenshot'] = self.screenshot('failed') + self.result['results']['steps'].append(step) + else: + self.result['results']['screenshot'] = self.screenshot('failed') + # cmd=cmd, rc=rc, stdout=out, stderr=err, + self.module.fail_json(**self.result) + + def screenshot(self, suffix='default'): + """Screenshot.""" + details = {} + if 'base64' in self.arg.screenshot_type: + base64 = self.browser.get_screenshot_as_base64() + details['base64'] = base64 + elif 'file' in self.arg.screenshot_type: + path = '%s/%s%s.png' % (self.arg.screenshot_path, + self.arg.screenshot_prefix, + suffix) + self.browser.get_screenshot_as_file(path) + details['file'] = path + return details + + def keys(self, step, step_result): + """Keys.""" + step_result['keys'] = False + try: + keys_method = getattr(self.browser, step['keys']['type']) + if 'text' in step['keys']: + value = step['keys']['text'] + assert value and value.lower() != 'undefined' + keys_method(step['keys']['value']).send_keys(value) + if 'key' in step['keys']: + key_type = getattr(Keys, step['keys']['key']) + assert key_type and key_type.lower() != 'undefined' + keys_method(step['keys']['value']).send_keys(key_type) + except KeyError: + self.failed('configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('type error. check syntax.', step_result) + except NoSuchElementException: + self.failed('no such element.', step_result) + step_result['keys'] = True + return step_result + + def click(self, step, step_result): + """Click.""" + step_result['click'] = False + try: + click_method = getattr(self.browser, step['click']['type']) + click_method(step['click']['text']).click() + except KeyError: + self.failed('KeyError click configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('AttributeError. check syntax.', step_result) + except NoSuchElementException: + self.failed('no such element.', step_result) + step_result['click'] = True + return step_result + + def wait_for(self, step, step_result): + """Wait for.""" + step_result['wait_for'] = False + try: + waitfor_method = getattr(EC, step['wait_for']['method']) + waitfor_type = getattr(By, step['wait_for']['type']) + waitfor_text = step['wait_for']['text'] + except KeyError: + self.failed('KeyError wait_for configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('method or type error. check syntax.', step_result) + + try: + WebDriverWait(self.browser, self.arg.explicit_wait).until( + waitfor_method((waitfor_type, waitfor_text)) + ) + except TimeoutException: + self.failed('failure waiting for element.', step_result) + step_result['wait_for'] = True + return step_result + + def asserts(self, step, step_result): + """Assertions. + /usr/lib64/python2.7/site-packages/selenium/webdriver/remote/webdriver.py + def find_element_by_id(self, id_): + def find_elements_by_id(self, id_): + def find_element_by_xpath(self, xpath): + def find_elements_by_xpath(self, xpath): + def find_element_by_link_text(self, link_text): + def find_elements_by_link_text(self, text): + def find_element_by_partial_link_text(self, link_text): + def find_elements_by_partial_link_text(self, link_text): + def find_element_by_name(self, name): + def find_elements_by_name(self, name): + def find_element_by_tag_name(self, name): + def find_elements_by_tag_name(self, name): + def find_element_by_class_name(self, name): + def find_elements_by_class_name(self, name): + def find_element_by_css_selector(self, css_selector): + def find_elements_by_css_selector(self, css_selector): + def find_element(self, by=By.ID, value=None): + def find_elements(self, by=By.ID, value=None): + """ + step_result['assert'] = False + step_result['assert_results'] = [] + for aidx, item in enumerate(step['assert']): + step_result['assert_results'].append(False) + try: + assert_method = getattr(self.browser, item['type']) + assert assert_method(item['text']) + except KeyError: + self.failed('configuration failure. check syntax.', + step_result) + except NoSuchElementException: + self.failed('no such element: ' +item['type'] +' - ' +item['text'], step_result) + step_result['assert_results'][aidx] = True + step_result['assert'] = True + return step_result + + def steps(self): + """Loop through steps.""" + for idx, step in enumerate(self.arg.steps): + step_result = {'id': idx, + 'screenshot': 'no'} + if 'name' in step: + step_result['name'] = step['name'] + + if 'keys' in step: + step_result.update(self.keys(step, step_result)) + + if 'click' in step: + step_result.update(self.click(step, step_result)) + + if 'wait_for' in step: + step_result.update(self.wait_for(step, step_result)) + + if 'assert' in step: + step_result.update(self.asserts(step, step_result)) + + when = self.arg.screenshot_when + if self.arg.screenshot: + capture = False + if 'all' in when: + capture = True + elif 'start' in when and idx is 0: + capture = True + elif 'end' in when and idx is self.steps_num: + capture = True + + if capture: + suffix = idx + if 'name' in step: + suffix = '%s_%s' % (idx, step['name']) + step_result['screenshot'] = self.screenshot(suffix) + self.result['results']['steps'].append(step_result) + + +def which(fname): + """Returns the fully qualified path by searching Path of the given + name""" + for pe in os.environ['PATH'].split(os.pathsep): + checkname = os.path.join(pe, fname) + if os.access(checkname, os.X_OK) and not os.path.isdir(checkname): + return checkname + return None + +def test(): + # https://intoli.com/blog/running-selenium-with-headless-firefox/ + from selenium.webdriver.firefox.firefox_binary import FirefoxBinary + # Set the MOZ_HEADLESS environment variable which casues Firefox to start in headless mode. + if not 'MOZ_HEADLESS' in os.environ: os.environ['MOZ_HEADLESS'] = "1" + + # Select your Firefox binary. + firefox_path = '/usr/bin/firefox' + assert os.path.isfile(firefox_path) + binary = FirefoxBinary(firefox_path=firefox_path, log_file=sys.stdout) + # Start selenium with the configured binary. + driver = webdriver.Firefox(firefox_binary=binary) + + # Visit this webpage. + driver.get("http://localhost:3128/") + heading_element = driver.find_element_by_xpath('//*[@href="doc/"]') + if heading_element: + textContent = heading_element.get_property('textContent').strip() + print('textContent = ' +textContent) + assert textContent == 'The Polipo manual' + else: + print("Heading element not found!") + + driver.get("http://localhost:9090/") + heading_element = driver.find_element_by_xpath('//*[@href="/+history/FrontPage"]') + if heading_element: + textContent = heading_element.get_property('textContent').strip() + print('textContent = ' +textContent) + assert textContent == 'History' + else: + print("Heading element not found!") + + driver.quit() + # Ideally, we could instruct Firefox to run in headless mode by including the -headless flag when running the + # binary with something like + # binary.add_command_line_options('-headless') + # However, on current versions of Firefox (up to and including Nightly 58.0a1) running on Windows 10 this + # flag doesn?t seem to work. Luckily, we can achieve the same effect by setting the MOZ_HEADLESS environment + # variable either from the command line with set MOZ_HEADLESS=1 or from the python script itself as above. + return 0 + +def main(): + """Main.""" + # pylint: disable = too-many-branches + + from ansible.module_utils.basic import _load_params,_ANSIBLE_ARGS + if len(sys.argv) > 1: + if not os.path.isfile(sys.argv[1]): + raise RuntimeError('ERROR: ' +'file not found ' +sys.argv[1]) + + # this allows us to leave stdin alone for pdb + try: + with open(sys.argv[1], 'rb') as fd: + _ANSIBLE_ARGS = fd.read() + except Exception as e: + raise RuntimeError('ERROR: ' +'file not read ' +repr(sys.argv) +str(e)) + + try: + module = AnsibleModule( + # check_invalid_arguments=False, + argument_spec=dict( + url=dict(type='str', required=True), + browser=dict(type='str', default='phantomjs', + choices=['phantomjs', 'firefox', 'chrome']), + browser_executable=dict(type='path', default=""), + browser_log=dict(type='path', default=""), + browser_timeout=dict(type='int', default=30), + browser_service_args=dict(type='list', default=[]), + # For firefox - if this is "legacy" marionette will not be used - + # The legacy driver provided and maintained by the Selenium project + # doesn't work for Firefox 48 or higher, and will never work for newer versions of Firefox. + webdriver_executable=dict(type='path', default=""), + webdriver_log=dict(type='str', default=""), + width=dict(type='int', default=1024), + height=dict(type='int', default=768), + title=dict(type='str', default=""), + screenshot=dict(type='bool', default=False), + screenshot_when=dict(type='list', default=['error']), + screenshot_type=dict(type='str', default='base64', + choices=['file', 'base64']), + screenshot_path=dict(type='str', default='/tmp'), + screenshot_prefix=dict(type='str', default='selenium_'), + steps=dict(type='list', required=True), + explicit_wait=dict(type='int', default=2), + implicit_wait=dict(type='int', default=20), + validate_cert=dict(type='bool', default=True), + ), + supports_check_mode=False + ) + except Exception as e: + results = {'failed': True, 'msg': 'ERROR: selenium_test.py AnsibleModule failed ' +str(e)} + print('\n' +jsonify(results)) + sys.exit(1) + + # check urlparse dependency + if URLPARSE_INSTALLED_ERR: + dArgs=dict(error=True, msg=URLPARSE_INSTALLED_ERR) + module.fail_json(**dArgs) + # check selenium dependency + if SELENIUM_INSTALLED_ERR: + dArgs=dict(error=True, msg=SELENIUM_INSTALLED_ERR) + module.fail_json(**dArgs) + try: + # initiate module + results = {'failed': True, 'msg': 'ERROR: selenium_test.py something went wrong'} + with AnsibleSelenium(module) as sel: + results = sel.result + module.exit_json(**results) + except Exception as e: + results = {'failed': True, 'msg': 'ERROR: selenium_test.py AnsibleSelenium failed ' +str(e)} + print('\n' +jsonify(results)) + sys.exit(2) + +if __name__ == '__main__': + if len(sys.argv) > 1: + if sys.argv[1] == 'test': + i = test() + sys.exit(i) + main() + +# [ -f selenium_test.json ] || yaml2json.bash < selenium_test.json > selenium_test.json +# python2.bash selenium_test.py selenium_test.json +# old -version 3.141 with manifest +# /usr/local/lib64/python2.7/site-packages/selenium/webdriver/firefox/webdriver.xpi diff --git a/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.dst b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.dst new file mode 100755 index 0000000..9ee0814 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.dst @@ -0,0 +1,402 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# (c) 2016, Brad Gibson +# +# This file is a 3rd Party module for Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""Ansible Selenium Module.""" + +DOCUMENTATION = ''' +--- +module: selenium_test +author: "Brad Gibson" +version_added: "2.3" +short_description: Run selenium tests +requires: [ selenium>=3.0.2 ] +description: + - Run selenium tests against provided URL. + - Use Clicks, Typing, Waiting and Assertions. +options: + url: + required: true + description: + - URL to run selenium tests against. + browser: + required: false + default: "phantomjs" + choices: [ "phantomjs", "firefox", "chrome" ] + description: + - Browser to use for testing. + width: + required: false + default: 1024 + description: + - Browser screen width. + height: + required: false + default: 768 + description: + - Browser screen height. + title: + required: false + description: + - Title to validate after initial load. + screenshot: + required: false + default: false + description: + - Enable/Disable screenshots. + screenshot_when: + required: false + default: [ "error" ] + choices: [ "all", "start", "end", "error" ] + description: + - Enable/Disable screenshots. + screenshot_type: + required: false + default: "base64" + choices: [ "base64", "file" ] + description: + - Screenshot format. + screenshot_path: + required: false + default: "/tmp" + description: + - Screenshot path. + screenshot_prefix: + required: false + default: "selenium_" + description: + - Screenshot file prefix. + implicit_wait: + required: false + default: 20 + description: + - Implicit wait value when loading webpage. + explicit_wait: + required: false + default: 2 + description: + - Explicit wait value when loading webpage. + steps: + required: true + description: + - Steps to perform. + validate_cert: + required: false + default: true + description: + - Validate SSL certificate. +''' + +EXAMPLES = ''' +# run basic check against given url +- selenium_test: url=http://www.python.org +''' + +# pylint: disable = wrong-import-position +import sys # noqa +try: + if sys.version_info < (3, 0): + from urlparse import urlparse # noqa + else: + from urllib.parse import urlparse # noqa + URLPARSE_INSTALLED = True +except ImportError: + URLPARSE_INSTALLED = False + +from ansible.module_utils.basic import AnsibleModule # noqa +try: + from selenium import webdriver + from selenium.webdriver.common.keys import Keys + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from selenium.common.exceptions import NoSuchElementException + from selenium.common.exceptions import TimeoutException + SELENIUM_INSTALLED = True +except ImportError: + SELENIUM_INSTALLED = False + + +class AnsibleSelenium(object): + """Ansible Selenium Class.""" + + def __init__(self, module): + """Init.""" + self.module = module + self.arg = lambda: None + for arg in self.module.params: + setattr(self.arg, arg, self.module.params[arg]) + + self.steps_num = len(self.arg.steps) - 1 + self.result = {'changed': False, + 'failed': False, + 'results': { + 'steps': [], + 'num': self.steps_num + }} + + self.browser = self._browser() + + def __enter__(self): + """Enter by loading website and return self.""" + # validate url + url_parsed = urlparse(self.arg.url) + if url_parsed.scheme not in ['http', 'https']: + self.failed('invalid url.') + if not url_parsed.netloc: + self.failed('invalid url.') + + # load browser + self.browser.get(self.arg.url) + self.result['browser_closed'] = False + + # validate title + if self.arg.title in self.browser.title: + self.result['results']['title'] = True + else: + self.result['results']['title'] = False + self.failed('title does not match.') + + # process steps + self.steps() + + return self + + def __exit__(self, type, value, traceback): + """Exit by closing and quitting the browser.""" + # pylint: disable = redefined-builtin + self.browser.close() + self.browser.quit() + self.result['browser_closed'] = True + + def _browser(self): + """Select browser and return object.""" + name = self.arg.browser + if 'phantomjs' in name: + return self._phantomjs() + elif 'firefox' in name: + return self._firefox() + elif 'chrome' in name: + return self._chrome() + + def _phantomjs(self): + """Use PhantomJS browser.""" + service_args = ['--ssl-protocol=any'] + if not self.arg.validate_cert: + service_args.append('--ignore-ssl-errors=true') + driver = webdriver.PhantomJS(service_args=service_args) + driver.set_window_size(self.arg.width, self.arg.height) + driver.set_page_load_timeout(self.arg.implicit_wait) + return driver + + def _firefox(self): + """Use Firefox browser.""" + # pylint: disable = no-self-use + driver = webdriver.Firefox() + return driver + + def _chrome(self): + """Use Chrome browser.""" + # pylint: disable = no-self-use + driver = webdriver.Chrome() + return driver + + def failed(self, msg, step=None): + """Failed.""" + # self.result['failed'] = True + self.result['msg'] = msg + if step: + step['error'] = True + step['msg'] = msg + when = self.arg.screenshot_when + if 'all' in when or 'error' in when: + step['screenshot'] = self.screenshot('failed') + self.result['results']['steps'].append(step) + else: + self.result['results']['screenshot'] = self.screenshot('failed') + self.module.fail_json(**self.result) + + def screenshot(self, suffix='default'): + """Screenshot.""" + details = {} + if 'base64' in self.arg.screenshot_type: + base64 = self.browser.get_screenshot_as_base64() + details['base64'] = base64 + elif 'file' in self.arg.screenshot_type: + path = '%s/%s%s.png' % (self.arg.screenshot_path, + self.arg.screenshot_prefix, + suffix) + self.browser.get_screenshot_as_file(path) + details['file'] = path + return details + + def keys(self, step, step_result): + """Keys.""" + step_result['keys'] = False + try: + keys_method = getattr(self.browser, step['keys']['type']) + if 'text' in step['keys']: + value = step['keys']['text'] + keys_method(step['keys']['value']).send_keys(value) + if 'key' in step['keys']: + key_type = getattr(Keys, step['keys']['key']) + keys_method(step['keys']['value']).send_keys(key_type) + except KeyError: + self.failed('configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('type error. check syntax.', step_result) + except NoSuchElementException: + self.failed('no such element.', step_result) + step_result['keys'] = True + return step_result + + def click(self, step, step_result): + """Click.""" + step_result['click'] = False + try: + click_method = getattr(self.browser, step['click']['type']) + click_method(step['click']['text']).click() + except KeyError: + self.failed('configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('type error. check syntax.', step_result) + except NoSuchElementException: + self.failed('no such element.', step_result) + step_result['click'] = True + return step_result + + def wait_for(self, step, step_result): + """Wait for.""" + step_result['wait_for'] = False + try: + waitfor_method = getattr(EC, step['wait_for']['method']) + waitfor_type = getattr(By, step['wait_for']['type']) + waitfor_text = step['wait_for']['text'] + except KeyError: + self.failed('configuration failure. check syntax.', step_result) + except AttributeError: + self.failed('method or type error. check syntax.', step_result) + + try: + WebDriverWait(self.browser, self.arg.explicit_wait).until( + waitfor_method((waitfor_type, waitfor_text)) + ) + except TimeoutException: + self.failed('failure waiting for element.', step_result) + step_result['wait_for'] = True + return step_result + + def asserts(self, step, step_result): + """Assertions.""" + step_result['assert'] = False + step_result['assert_results'] = [] + for aidx, item in enumerate(step['assert']): + step_result['assert_results'].append(False) + try: + assert_method = getattr(self.browser, item['type']) + assert assert_method(item['text']) + except KeyError: + self.failed('configuration failure. check syntax.', + step_result) + except NoSuchElementException: + self.failed('no such element.', step_result) + step_result['assert_results'][aidx] = True + step_result['assert'] = True + return step_result + + def steps(self): + """Loop through steps.""" + for idx, step in enumerate(self.arg.steps): + step_result = {'id': idx, + 'screenshot': 'no'} + if 'name' in step: + step_result['name'] = step['name'] + + if 'keys' in step: + step_result.update(self.keys(step, step_result)) + + if 'click' in step: + step_result.update(self.click(step, step_result)) + + if 'wait_for' in step: + step_result.update(self.wait_for(step, step_result)) + + if 'assert' in step: + step_result.update(self.asserts(step, step_result)) + + when = self.arg.screenshot_when + if self.arg.screenshot: + capture = False + if 'all' in when: + capture = True + elif 'start' in when and idx is 0: + capture = True + elif 'end' in when and idx is self.steps_num: + capture = True + + if capture: + suffix = idx + if 'name' in step: + suffix = '%s_%s' % (idx, step['name']) + step_result['screenshot'] = self.screenshot(suffix) + self.result['results']['steps'].append(step_result) + + +def main(): + """Main.""" + # pylint: disable = too-many-branches + module = AnsibleModule( + argument_spec=dict( + url=dict(type='str', required=True), + browser=dict(type='str', default='phantomjs', + choices=['phantomjs', 'firefox', 'chrome']), + width=dict(type='int', default=1024), + height=dict(type='int', default=768), + title=dict(type='str'), + screenshot=dict(type='bool', default=False), + screenshot_when=dict(type='list', default=['error']), + screenshot_type=dict(type='str', default='base64', + choices=['file', 'base64']), + screenshot_path=dict(type='str', default='/tmp'), + screenshot_prefix=dict(type='str', default='selenium_'), + steps=dict(type='list', required=True), + explicit_wait=dict(type='int', default=2), + implicit_wait=dict(type='int', default=20), + validate_cert=dict(type='bool', default=True), + ), + supports_check_mode=False + ) + + # check urlparse dependency + if not URLPARSE_INSTALLED: + module.fail_json(msg='urlparse not installed.') + + # check selenium dependency + if not SELENIUM_INSTALLED: + module.fail_json(msg='selenium not installed.') + + # initiate module + results = {'failed': True, 'msg': 'something went wrong'} + with AnsibleSelenium(module) as sel: + results = sel.result + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.urls b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.urls new file mode 100644 index 0000000..c716799 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/selenium_test.py.urls @@ -0,0 +1 @@ +https://github.com/napalm255/ansible-selenium diff --git a/overlay/Linux/usr/local/share/ansible/library/test_test.err b/overlay/Linux/usr/local/share/ansible/library/test_test.err new file mode 100644 index 0000000..08de668 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/test_test.err @@ -0,0 +1,222 @@ +which firefox || exit 0 +/usr/bin/firefox +/var/local/bin/python2.bash selenium_test.py test +Traceback (most recent call last): + File "selenium_test.py", line 743, in + i = test() + File "selenium_test.py", line 638, in test + driver = webdriver.Firefox(firefox_binary=binary) + File "/var/local/lib/python2.7/site-packages/selenium-4.0.0a6.post2-py2.7.egg/selenium/webdriver/firefox/webdriver.py", line 191, in __init__ + keep_alive=True) + File "/var/local/lib/python2.7/site-packages/selenium-4.0.0a6.post2-py2.7.egg/selenium/webdriver/remote/webdriver.py", line 183, in __init__ + self.start_session(capabilities, browser_profile) + File "/var/local/lib/python2.7/site-packages/selenium-4.0.0a6.post2-py2.7.egg/selenium/webdriver/remote/webdriver.py", line 280, in start_session + response = self.execute(Command.NEW_SESSION, parameters) + File "/var/local/lib/python2.7/site-packages/selenium-4.0.0a6.post2-py2.7.egg/selenium/webdriver/remote/webdriver.py", line 349, in execute + self.error_handler.check_response(response) + File "/var/local/lib/python2.7/site-packages/selenium-4.0.0a6.post2-py2.7.egg/selenium/webdriver/remote/errorhandler.py", line 204, in check_response + raise exception_class(value) +selenium.common.exceptions.WebDriverException: Message: + + + + 503 - Forwarding failure (Privoxy@localhost) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ 503 + + +

+ This is Privoxy 3.0.32 on localhost (127.0.0.1), port 3128, + disabled +

+ +
+

Forwarding failure

+

Privoxy was unable to socks5t-forward your request + http://localhost:15991/session + through 127.0.0.1: + SOCKS5 request failed

+

+

Just try again to + see if this is a temporary problem, or check your forwarding settings + and make sure that all forwarding servers are working correctly and + listening where they are supposed to be listening. +

+
+

More Privoxy:

+ +
+ +

Support and Service:

+

+ The Privoxy Team values your feedback. +

+

+ Please have a look at the User Manual to learn how to + get support or report problems. +

+ If you want to support the Privoxy Team, you can + participate + or donate. +

+ +
+ + + + +make: *** [Makefile:22: test_test] Error 1 diff --git a/overlay/Linux/usr/local/share/ansible/library/tests/add_firefox_extensions.html b/overlay/Linux/usr/local/share/ansible/library/tests/add_firefox_extensions.html new file mode 100644 index 0000000..24d8ce3 --- /dev/null +++ b/overlay/Linux/usr/local/share/ansible/library/tests/add_firefox_extensions.html @@ -0,0 +1,2735 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + How to install firefox extensions for all users + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ +
+ + +
+
+ +
+ +
+ + +
+
+
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + +

+ +

+ + + + + + + + + +

+

How to install firefox extensions for all users

+
+ + + + + + + +
+ + + + + + + +
+
+
+ + Tags +
+ +
+ +
+
+ + + + +
+
+ +
+ +
+ + + + + + + + + + + + +
+
+ +
+
+ + +Login or Register to Reply +
+
+ + + + +
+ +
+
+ +

+ + +

+ + + + + + + + + + + + + + + + + + + + +
+
+ +   +
+
+ Thread Tools + + + + + Search this Thread + + + +
+ +
+ + + + + + + +
+ + + + + + + + + +
+
+ + +
+ + + + +
+
+ +
+
+
+ + +# + 1   + + + + + + + + + +
+ +
+ + Old + + + 05-17-2010 + + + + + + +
+
+
+
+ + + + + + +
+
+ + + + +
+ + + [SOLVED] How to install firefox extensions for all users + +
+
+ + + +
Hello,
+
+I am wondering the "new way" to install firefox extensions for all users.
+
+From https://developer.mozilla.org/En/Command_Line_Options we can see that the "old way" is no longer available.
+
+ + + +
+
Quote:
+ +
+ +
-install-global-extension and -install-global-theme have been removed from Gecko 1.9.2 and upwards.
+ +
+
+I'm using the Lucid UNE to write this and note that it has these extensions installed for every user:
+
+ + + +
+
Quote:
+ +
+ +
Ubuntu Firefox Modifications
+webfav
+ +
+
+I have several users on many computers and would like to have certain extensions installed for them, such as xmarks, add bookmark here, coolpreview, etc.
+
+I searched the entire root filesystem and didn't find any .xpi files.
+
+Can someone give me an idea on how to do this? How does Ubuntu do it?
+
+With thanks,
+Narnie
+ + + + + + + + +
+
+ + + Last edited by Narnie; 05-22-2010 at 06:27 PM.. + + + +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + + + +
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
Narnie
+View Public Profile for Narnie +
+ +Find all posts by Narnie + +
+
+
+
+ + +
+ +
+ +
+ + +
+ + + +
+ + + + +
+ + + + + + + + +
+ + + + +
+
+ +
+
+
+ + +# + 2   + + + + + + + + + +
+ +
+ + Old + + + 05-17-2010 + + + + + + +
+
+
+
+ + + + + + +
+
+ + + + + + +
See https://developer.mozilla.org/en/Installing_extensions
+
+If you want silent installation, unzip any jar you find in the extension. Otherwise the first user will be prompted to install the extension.
+ + + + + + + + +
+
+ + + Last edited by fpmurphy; 05-17-2010 at 09:05 PM.. + + + Reason: Typo + + +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + + + +
+ +
+
+
+
+
+
+ + + +
This User Gave Thanks to fpmurphy For This Post:
+ +
+ + + +
+
figaro (12-14-2010)
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
fpmurphy
+View Public Profile for fpmurphy +
+ +Find all posts by fpmurphy + +
+
+
+
+ + +
+ +
+ + +
+
+ +
+ + +
+
+
+ +
+ + +
+
+
+ + +
+ + + + +
+ + + + + + + + +
+ + + + +
+
+ +
+
+
+ + +# + 3   + + + + + + + + + +
+ +
+ + Old + + + 05-17-2010 + + + + + + +
+
+
+
+ + + + + + +
+
+ + + + + + +
+ + +
+
Quote:
+ +
+ +
+ Originally Posted by fpmurphy + + + +
+
See https://developer.mozilla.org/en/Installing_extensions
+
+If you want silent installation, unzip any jar you find in the extension. Otherwise the fist user will be prompted to install the extension.
+ +
+
+Thanks,
+
+This looks like just what I'm looking for.
+
+Smilie
+
+Narnie
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + + + +
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
Narnie
+View Public Profile for Narnie +
+ +Find all posts by Narnie + +
+
+
+
+ + +
+ +
+ + + + +
+ + + + +
+ + + + + + + + +
+ + + + +
+
+ +
+
+
+ + +# + 4   + + + + + + + + + +
+ +
+ + Old + + + 05-22-2010 + + + + + + +
+
+
+
+ + + + + + +
+
+ + + + + + +
Attached (at the bottom) is a script I wrote to install extensions for all users.
+
+Save the script (or just untarball the attachment) to a file (I called it add_firefox_extensions).
+
+Place it in a directory found with
+
+
+
Code:
+ +
echo $PATH

+
+
+ +My system is set up to look in $HOME/bin, so I put mine there.
+
+make it executable with:
+
+
+
Code:
+ +
chmod +x add_firefox_extensions

+
+
+ +now run it for the instructions (all you need to do is pass the xpi file) and it will do all the grunt work.
+Hope it helps some.
+
+Yours,
+Narnie
+
+ps, let me know if you run into any problems.
+
+
+
Code:
+ +
#! /bin/bash
+#
+
+
+USAGE () {
+    USG=\
+"
+___________________________________________________________________
+
+${0##*/} [-h] XPI_FILE_TO_INSTALL
+
+installs the xpi file to /usr/lib/firefox-addons/extensions which
+will inable installation the next time the user runs firefox
+
+-h    print help
+___________________________________________________________________
+"
+    LC=1
+    echo "$USG"
+    exit
+}
+
+if [ $# -lt 1 -o "$1" = -h ] ; then USAGE ; fi 
+
+setUp () {
+    if [ ! -f $EXT ] ; then
+        echo "File doesn't exist"
+        exit 1
+    fi
+    umask 0022
+    if [ -d $TMPDIR ] ; then
+        (cd $TMPDIR ; sudo rm -rf *)
+    else
+        sudo mkdir -p "$TMPDIR"
+    fi
+    echo -e "\n\nworking . . .\n\n"
+    sudo unzip "$EXT" -d "$TMPDIR" &> /dev/null
+}
+
+getID () {
+    local IFS="
+"
+    FILE="`cat $TMPDIR/install.rdf`"
+    for i in $FILE ; do
+        if echo "$i"|grep "urn:mozilla:install-manifest" &> /dev/null ; then
+            GET=true
+        fi
+        if [ "$GET" = true ] ; then
+            if echo "$i"|grep "<em:id>" ; then
+                ID=`echo "$i" | sed 's#.*<em:id>\(.*\)</em:id>.*#\1#'`
+            elif echo "$i"|grep "em:id=\"" ; then
+                ID=`echo "$i" | sed 's/.*em:id="\(.*\)".*/\1/'`
+            fi
+            if [ -n "$ID" ] ; then
+                return
+            fi
+        fi
+    done
+    return 1
+}
+
+installExtention () {
+    if [ -d "$EXTDIR/$ID" ] ; then
+        sudo rm -rvf "$EXTDIR/$ID"
+    fi
+    sudo mv -vv "$TMPDIR" "$EXTDIR/$ID"
+    if [ $? = 0 ] ; then
+        echo -e "\n\nExtension was installed\n\n"
+    else
+        echo -e "\n\nError installing extension\n\n"
+    fi
+}
+
+getPath () {
+    (
+    cd ${1%%/*}
+    pwd
+    )
+}
+
+cleanUp () {
+    if [ -d $TMPDIR ] ; then
+        sudo rm -rf $TMPDIR
+    fi
+    exit $1
+}
+
+EXT="$1"
+
+EXTDIR="/usr/lib/firefox-addons/extensions"
+TMPDIR="/tmp/ext"
+
+trap "cleanUp 1" 1 2 3 15
+
+setUp
+
+getID
+
+installExtention
+
+cleanUp

+
+
+ +
+ + + +
+
+
+ + + + + +
+ +
+
+
+ + +
+
+ + + + + + +
+
+ + + Last edited by Narnie; 05-22-2010 at 06:24 PM.. + + + +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + + + +
+ +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
Narnie
+View Public Profile for Narnie +
+ +Find all posts by Narnie + +
+
+
+
+ + +
+ +
+ + +
+ + +
+ + +
+ + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + +
+ +
+
+ Login or Register to Reply +
+
+ + +
+ + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +More UNIX and Linux Forum Topics You Might Find Helpful + +
+ + Firefox install old addons help + +cokedudeUNIX and Linux Applications211-29-2012 02:50 PM
+ + Push out Firefox bookmarks to multiple users + +yodomino6UNIX for Dummies Questions & Answers509-19-2008 02:03 AM
+ + Trying to install FP extensions + +marty 600UNIX for Dummies Questions & Answers004-08-2005 11:14 AM
+ +
+
+ + + + + + + + + + +
+ + + + + + + +
+ +
+
+ + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +