overlay/Linux/usr/local/src/ansible_gentooimgr

This commit is contained in:
emdee 2023-12-30 11:08:18 +00:00
parent fde52a9abc
commit 73c088e784
38 changed files with 8053 additions and 79 deletions

View File

@ -1,8 +1,9 @@
#!/bin/sh
ROLE=toxcore
EXE=`which pyanalyze` || exit 0
PYTHONPATH=$PWD/wrapper /usr/local/bin/python3.sh `which pyanalyze` \
PYTHONPATH=$PWD/wrapper /usr/local/bin/python3.sh $EXE \
src/ansible_gentooimgr/gentooimgr/*py \
> .pyanal.out 2>&1

View File

@ -47,17 +47,19 @@ lint::
build:: build_base
sudo $(MAKE) -$(MAKEFLAGS) build_overlay
build_base::
echo $@ "${BOX_NBD_BASE_QCOW}"
# @[ ! -f "${BOX_NBD_BASE_QCOW}" ] || exit 0 || true
# @[ "`grep nbd /proc/partitions | wc -l`" -eq 0 ] && \
# echo WARN looks like theres NO nbd mount && \
# exit 0
[ ! -f ${BOX_NBD_BASE_QCOW} ] || exit 1
build_base:: lint
echo INFO: $@ "${BOX_NBD_BASE_QCOW}"
@[ ! -f ${BOX_NBD_BASE_QCOW} ] || { \
echo WARN looks like theres already a build of \
"${BOX_NBD_BASE_QCOW}" ; exit 2 ; }
@( ! ps ax | grep -v grep | \
grep "qemu-nbd.*/dev/nbd.*${BOX_NBD_BASE_QCOW}" ) ||{ \
echo WARN looks like theres an active nbd mount of \
"${BOX_NBD_BASE_QCOW}" && exit 1 ; }
echo INFO running the toxcore role will build ${BOX_NBD_BASE_QCOW}
sudo sh ansible_local.bash --diff -i ${PWD}/hosts.yml \
-l ${LOCALHOST} -c local --verbose ${VERBOSE} \
$(ROLES) > .build-local-${LOCALHOST} 2>&1
$(ROLES) > .$@-${LOCALHOST} 2>&1
[ -f ${BOX_NBD_BASE_QCOW} ]
build_overlay::
@ -94,7 +96,7 @@ check::
check_localhost::
sudo sh ansible_local.bash --diff -i hosts.yml -l ${LOCALHOST} \
--check -c local --verbose ${VERBOSE} \
s $(ROLES) > .run-$@-${LOCALHOST} 2>&1
$(ROLES) > .$@-$@-${LOCALHOST} 2>&1
check_base::
ls ${BOX_NBD_BASE_QCOW}
@ -109,7 +111,7 @@ check_chroot::
-m setup -vvv ${YAML_CHROOT_NAME}
sudo sh ansible_local.bash --diff -i hosts.yml -l ${YAML_CHROOT_NAME} \
--check -c chroot --verbose ${VERBOSE} \
$(ROLES) > .check-${YAML_CHROOT_NAME}-${LOCALHOST} 2>&1
$(ROLES) > .$@-${YAML_CHROOT_NAME}-${LOCALHOST} 2>&1
check_overlay::
sudo /var/local/sbin/hostvms_libvirt_test_ga.bash ${INST_BOX_NAME} ls /
@ -123,19 +125,19 @@ check_overlay::
sudo virsh list | grep -q ${INST_BOX_NAME} || exit 0
sudo sh ansible_local.bash --diff -i hosts.yml -l ${INST_BOX_NAME} \
--check -c libvirt_qemu --verbose ${VERBOSE} \
$(ROLES) > .check-${INST_BOX_NAME}-${LOCALHOST} 2>&1
$(ROLES) > .$@-${INST_BOX_NAME}-${LOCALHOST} 2>&1
# Edit hosts.yml and customize this target if you are on a Debianish
devuan::
sudo sh ansible_local.bash --diff -i ${PWD}/hosts.yml \
-l devuan -c local --verbose ${VERBOSE} $(ROLES) \
> .check-${LOCALHOST} 2>&1
> .$@-${LOCALHOST} 2>&1
# Edit hosts.yml and customize this target if you are on a Gentoo
pentoo::
sudo sh ansible_local.bash --diff -i ${PWD}/hosts.yml \
-l pentoo -c local --verbose ${VERBOSE} $(ROLES) \
> .check-${LOCALHOST} 2>&1
> .$@-${LOCALHOST} 2>&1
run::
@[ ! -f ${BOX_NBD_BASE_QCOW} ] && \
@ -158,7 +160,7 @@ run_local:: lint
exit 0
sudo sh ansible_local.bash --diff -i hosts.yml -l ${LOCALHOST} \
-c local --verbose ${VERBOSE} $(ROLES) \
> .run-$@-${LOCALHOST} 2>&1
> .$@-$@-${LOCALHOST} 2>&1
run_chroot::
[ -d /mnt/gentoo/lost+found ] || exit 0
@ -178,22 +180,32 @@ run_libvirt::
-c libvirt_qemu --verbose ${VERBOSE} $(ROLES) \
> .run-${INST_BOX_NAME}-${LOCALHOST} 2>&1
# hourly is quick tests, weekly is medium tests, monthly is long tests
weekly:: test
test::
@[ -d /mnt/gentoo/lost+found ] && \
sudo $(MAKE) -$(MAKEFLAGS) $@_chroot
sudo $(MAKE) -$(MAKEFLAGS) $@_local
@[ -f ${INST_BOX_DIR}/images/${INST_BOX_NAME}.img ] && \
sudo $(MAKE) -$(MAKEFLAGS) $@_overlay
sudo $(MAKE) -$(MAKEFLAGS) $@_libvert
weekly:: test_overlay
test_overlay::
test_local::
bash .pyanal.sh &
sudo sh ansible_local.bash --diff -i ${PWD}/hosts.yml -l ${LOCALHOST} \
-c local \
--verbose ${VERBOSE} -t weekly \
$(ROLES) > .$@-${LOCALHOST} 2>&1
test_libvirt::
# bash .pyanal.sh &
# check if ${INST_BOX_NAME} is running
! sudo virsh list | grep -q ${INST_BOX_NAME} && exit 0
sudo sh ansible_local.bash --diff -i ${PWD}/hosts.yml \
-l ${INST_BOX_NAME} -c libvirt_qemu \
--verbose ${VERBOSE} -t weekly \
$(ROLES) > .check-${LOCALHOST} 2>&1
$(ROLES) > .$@-${LOCALHOST} 2>&1
veryclean:: clean
rm -f .run* .check*
clean::
find . -name \*~ -delete

View File

@ -58,10 +58,19 @@ You must set these variable in a host in hosts.yml in the linux_chroot_group:
BOX_NBD_MP: /mnt/gentoo
BOX_NBD_FILES: "/i/data/Agile/tmp/Topics/GentooImgr"
BOX_NBD_BASE_QCOW: "/g/Agile/tmp/Topics/GentooImgr/gentoo.qcow2"
This role is slow and may take an hour or more;
It will build the BOX_NBD_BASE_QCOW.
2) We build the qcow2 overlay image that we can maintain by libvirt.
As a safety feature you must create and open the qcow base image before
running the roles: the roles do not use qemu-nbd -c or qemu-nbd -d by
design. You may also choose to download the gentoo latest stage3 and
portage files to the directory specified in hosts.ynl as BOX_NBD_FILES
These
2) We build the qcow2 overlay image that we can maintain by libvirt.
It is run on the host by the build_overlay Makefile target which runs
/usr/local/bin/toxcore_build_overlay_qcow.bash. It gets its parameters from
the hosts.yml file from the host called gentoo1 in the linux_libvirt_group.
## Roles
@ -144,6 +153,8 @@ On Ansibles from 2.10 and later, you will need the community plugins installed.
### ansible_local.bash
We have a script that calls ansible to run our play: ansible_local.yml
[ -l limit ]
[ -c connection ]
[ --skip comma,separated]

38
ansible.cfg Normal file
View File

@ -0,0 +1,38 @@
[defaults]
log_path = var/tmp/2023/12/30/pentoo/base_proxy_toxcore.log
callback_plugins = ./lib/plugins/
# /i/data/DevOps/net/Http/docs.ansible.com/ansible/intro_configuration.html
# http://docs.ansible.com/ansible/intro_configuration.html#command-warnings
# WTF ERROR! Invalid callback for stdout specified: yaml with libvirt - 2.10
stdout_callback: yaml
callback_whitelist = timer
# Skipping callback plugin 'timer', unable to load
# https://github.com/ansible/ansible/issues/39122
bin_ansible_callbacks = True
command_warnings = False
retry_files_enabled = False
deprecation_warnings = False
display_args_to_stdout = False
error_on_undefined_vars = True
force_color = False
forks = 5
# Ansible by default will override variables in specific precedence orders, as described in Variables.
# When a variable of higher precedence wins, it will replace the other value.
#?! hash_behaviour = merged
#! fatal: [localhost]: FAILED! => {"changed": false, "cmd": "/bin/lsblk --list --noheadings --paths --output NAME,UUID --exclude 2", "msg": "Timer expired after 30 seconds", "rc": 257}
gather_timeout = 120
internal_poll_interval=1
# This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values
# improve performance with large playbooks at the expense of extra CPU load. Higher values are more
# suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage
# might be a concern. Default corresponds to the value hardcoded in 2.1:
# Fixme: should be per user
local_tmp = /var/tmp
library = ./library
nocows = 0
roles_path = ./roles
handler_includes_static = True
timeout = 60

View File

@ -2,13 +2,14 @@
# -o/--one-line
# debug can be split into controller/client
USAGE="USAGE: $0 [--verbose 0-3] [--check] [--debug port|-port|debug|ansible] [--diff] [--step] [--skip comma,separated] [--tags comma,separated] [ -l limit ] [ -c connection ] roles..."
# [--debug port|-port|debug|ansible]
USAGE="USAGE: $0 [--verbose 0-3] [--check] [--diff] [--step] [--skip comma,separated] [--tags comma,separated] [ -l limit ] [ -c connection ] roles..."
skip=
tags='untagged'
check=0
connection='local'
verbose=1
debug=
# debug=
diff=0
become=-1
limit=''

View File

@ -44,7 +44,7 @@ also=""
#
verbose=2
limit=$1
roles="proxy toxcore"
roles="proxy ansible-gentoo_install toxcore"
[ -z "$limit" ] && BOX_HOST=localhost || BOX_HOST=$limit
limit=$BOX_HOST

View File

@ -61,62 +61,66 @@
pre_tasks:
- name: "Suspicious location (.) in PATH discovered"
shell: |
echo $PATH | grep '\.:' && echo "WARN: dot is on the PATH" && exit 1
exit 0
register: dot_on_path_fact
# warning not an error - I cant see who is putting it on the PATH - a tailing :
ignore_errors: true
- block:
- name: lookup env PATH
debug: msg="{{ ansible_env.PATH }}"
when:
- dot_on_path_fact is defined
- dot_on_path_fact is failed
- name: "Suspicious location (.) in PATH discovered"
shell: |
echo $PATH | grep '\.:' && echo "WARN: dot is on the PATH" && exit 1
exit 0
register: dot_on_path_fact
# warning not an error - I cant see who is putting it on the PATH - a tailing :
ignore_errors: true
- name: "set dates"
set_fact:
DOW: 0 # Day of week - unused
DOM: "{{ ansible_date_time.day|int }}" # Day of month
DATE: "{{ansible_date_time.day}}" # +%Y-%m-%d
date_slash: "{{ ansible_date_time.date|replace('-','/') }}" # +%Y/%m/%d
date_dash: "{{ ansible_date_time.date }}" # +%Y-%m-%d
date_week_slash: "{{ ansible_date_time.year }}/{{ ansible_date_time.weeknumber }}"
date_week_dash: "{{ ansible_date_time.year }}-{{ ansible_date_time.weeknumber }}"
- name: lookup env PATH
debug: msg="{{ ansible_env.PATH }}"
when:
- dot_on_path_fact is defined
- dot_on_path_fact is failed
- debug:
msg: "{{date_slash}} ansible_connection={{ansible_connection|default('') }} ROLES={{ROLES}}"
- name: "set dates"
set_fact:
DOW: 0 # Day of week - unused
DOM: "{{ ansible_date_time.day|int }}" # Day of month
DATE: "{{ansible_date_time.day}}" # +%Y-%m-%d
date_slash: "{{ ansible_date_time.date|replace('-','/') }}" # +%Y/%m/%d
date_dash: "{{ ansible_date_time.date }}" # +%Y-%m-%d
date_week_slash: "{{ ansible_date_time.year }}/{{ ansible_date_time.weeknumber }}"
date_week_dash: "{{ ansible_date_time.year }}-{{ ansible_date_time.weeknumber }}"
- name: "hostvars[inventory_hostname]"
debug:
# |to_yaml
msg: "hostvars[inventory_hostname] {{hostvars[inventory_hostname]}}"
when: false
- debug:
msg: "{{date_slash}} ansible_connection={{ansible_connection|default('') }} ROLES={{ROLES}}"
- name: "ansible_lsb.id BOX_OS_FAMILY"
assert:
that:
- "'{{ansible_lsb.id}}' == '{{BOX_OS_NAME}}'"
success_msg: "BOX_OS_FAMILY={{BOX_OS_FAMILY}}"
fail_msg: "ON tHE WRONG BOX {{ansible_lsb.id}} "
when:
- ansible_connection != 'local'
- false # may not exist
ignore_errors: true
- name: "hostvars[inventory_hostname]"
debug:
# |to_yaml
msg: "hostvars[inventory_hostname] {{hostvars[inventory_hostname]}}"
when: false
- name: "check BOX_ANSIBLE_CONNECTIONS"
assert:
that:
- "{{ansible_connection in BOX_ANSIBLE_CONNECTIONS}}"
- name: "ansible_lsb.id BOX_OS_FAMILY"
assert:
that:
- "'{{ansible_lsb.id}}' == '{{BOX_OS_NAME}}'"
success_msg: "BOX_OS_FAMILY={{BOX_OS_FAMILY}}"
fail_msg: "ON tHE WRONG BOX {{ansible_lsb.id}} "
when:
- ansible_connection != 'local'
- ansible_lsb.id|default('')" != ''
ignore_errors: true
- name: "we will use sudo and make it a prerequisite"
shell: |
which sudo
- name: "check BOX_ANSIBLE_CONNECTIONS"
assert:
that:
- "{{ansible_connection in BOX_ANSIBLE_CONNECTIONS}}"
- name: "check ansible_python_interpreter"
shell: |
"{{ansible_python_interpreter|default('python3')}}" --version
- name: "we will use sudo and make it a prerequisite"
shell: |
which sudo || exit 1
# "check ansible_python_interpreter"
"{{ansible_python_interpreter|default('python3')}}" --version
# required
tags: always
check_mode: false
- block:
@ -175,7 +179,11 @@
# msg: ovirtsdk required for this module
ignore_errors: true
# required
tags: always
check_mode: false
when: ansible_connection == 'libvirt_qemu'
# # required?
# tags: always
# check_mode: false

View File

@ -0,0 +1,2 @@
#!/bin/sh

View File

@ -0,0 +1,17 @@
GentooIMGR Configuration Specification
======================================
---------------
Getting Started
---------------
If you want your own customized gentoo image, simply copy the gentooimgr/configs/base.json.example file:
```sh
cp gentooimgr/configs/base.json.example gentooimgr/myconfig.json
# modify your config file. It needs to be saved in the directory directly above or within gentooimgr to be
# included in the generated iso file and therefore mounted automatically; otherwise, copy it to the live image
python -m gentooimgr -c gentooimgr/myconfig.json install
```

View File

@ -0,0 +1,160 @@
GentooImgr: Gentoo Image Builder for Cloud and Turnkey ISO installers
=====================================================================
** This is a modified version of https://github.com/NucleaPeon/gentooimgr/
where we've modified the code a little to do use Python logging. We can
still use it for the build stage, but we think the install stahe is better
done using ansible, hence the libvirt_cloud playbook.**
GentooImgr is a python script system to build cloud images based on Gentoo Linux.
Huge thanks to https://github.com/travisghansen/gentoo-cloud-image-builder for providing a foundation to work from.
**Features:**
* This project enables easy access to building ``systemd`` or ``openrc`` -based images.
* Performs automatic download AND verification of the linux iso, stage3 tarball and portage.
* Caches the iso and stage3 .txt files for at most a day before redownloading and rechecking for new files
* Sane and readable cli commands to build, run and test.
* Step system to enable user to continue off at the same place if a step fails
* No heavy packages like rust included ** TODO
**rename to gentooimgr, upload to pip**
Preface
-------
This project was created so I could spawn off Gentoo OS templates on my Proxmox server for various services while being more efficient than many other Linux OS's and avoiding systemd.
This python module contains all the software needed to download resources, build the image, run the image in qemu, and allow for some extensibility. The built-in functionality includes a base standard gentoo image configuration as well as a cloud-init image that can be run. You can copy the .json config file and optionally a kernel .config file to configure your image to your liking and pass it in with ``--config``.
This software is in beta so please report any issues or feature requests. You are **highly** encouraged to do so.
Thanks!
Roadmap
-------
* [X] Use gentooimgr to configure and Install a Base Gentoo OS using the least amount of configuration
- Successfully built a gentoo qcow2 image that can be run in qemu, but it requires using the --dist-kernel flag
as building from source still requires some work.
* [X] Use gentooimgr to create a usable cloud image (requires --dist-kernel currently)
* [ ] Use gentooimgr to create Gentoo installations on other non-amd64/non-native architectures (ex: ppc64)
Prerequisites
-------------
* [ ] QEMU
* [ ] python3.11
* [ ] Recommended 20GB of space
* [ ] Internet Connection
* [ ] virt-sparsify (for use with `gentooimgr shrink` action)
Usage
-----
```sh
git clone https://github.com/NucleaPeon/gentooimgr.git
python -m gentooimgr build
python -m gentooimgr run
```
Once qemu is running, mount the available gentooimgr iso and run the appropriate command:
```sh
mkdir -p /mnt/gi
mount /dev/disk/by-label/gentooimgr /mnt/gi
cd /mnt/gi
python -m gentooimgr --config-cloud install
```
Configuring the cloud image will automatically bring in appropriate kernel configs (these are defined in ``gentooimgr/configs/cloud.config``).
Then perform any additional procedures, such as shrinking the img from 10G to ~3-4G
```sh
python -m gentooimgr shrink gentoo.qcow2
```
**NOTE** Due to how ``gentooimgr`` dynamically finds the most recent portage/stage3 and iso files, if multiples exist in the same directory you may have to specify them using the appropriate flag (ie: ``--iso [path-to-iso]``). Older images can be used in this manner and eventually setting those values in the .json file should be recognized by gentooimgr so there will be no need to specify them on the command line.
Extended Usage
--------------
GentooImgr is flexible in that it can be run on a live running system as well as on a livecd in qemu;
It's possible to automate a new bare-metal Gentoo installation (with some further development) simply by running the ``install`` action or equivalent command to install and set up everything.
Eventually, GentooImgr will be used to build gentoo turnkey OS images automatically.
----
One of the conveniences of this software is that it allows you to continue off where an error last occured.
For example, if there's an issue where a package was renamed and the compile software step fails, you can edit
the package list and rerun ``python -m gentooimgr cloud-cfg`` without any arguments and it will resume the compile
step (albeit at the beginning of that step.)
There are also commands that allow you to quickly enter the livecd chroot or run commands in the chroot:
```sh
python -m gentooimgr chroot
```
Mounts/binds are handled automatically when you chroot, but you will need to ``unchroot`` after to unmount the file systems:
```sh
python -m gentooimgr chroot
# do stuff
exit
python -m gentooimgr unchroot
```
Adding Image to Proxmox
-----------------------
(Use the correct username and address to ssh/scp)
```sh
scp gentoo-[stamp].qcow2 root@proxmox:/tmp
ssh root@proxmox
# Set vmbr to your available bridge, it could be vmbr0 or vmbr1, etc.
qm create 1000 --name gentoo-templ --memory 2048 --net0 virtio,bridge=vmbr0
qm importdisk 1000 /tmp/gentoo-[stamp].qcow2 local -format qcow2
qm set 1000 --scsihw virtio-scsi-pci --scsi0 /var/lib/vz/images/1000/vm-1000-disk-0.qcow2
qm set 1000 --ide2 local:cloudinit --boot c --bootdisk scsi0 --serial0 socket --vga serial0
qm resize 1000 scsi0 +20G
qm set 1000 --ipconfig0 ip=dhcp
qm set 1000 --sshkey ~/.ssh/id_rsa.pub
qm template 1000
(Creating a template from this image requires clicking the "Regenerate Image" button or equivalent cli command,
after you set username and password)
```
Caveats
--------
* [X] Forced use of Rust in cloud images (cloud-init dependency)
Unfortunately, using cloud-init brings in cryptography and oauthlib which pulls in rust. Any cloud images therefore are forced to use it, which is a large compilation target if rust-bin is not used. Some FOSS users, myself included, do not want rust installed on their systems and dislike how it is encroaching on so many FOSS areas.
Work may be done to see if this can be avoided, but for now consider it a requirement.
TODO
----
* [ ] Hash check portage downloads on ``build``
* [ ] have a way to set the iso creation to either ignore things not set in the config file, or have options to include dirs, etc.
* [ ] --skip-update-check : Do not even attempt to download new files in any capacity, simply use the latest ones found.
We could implement a way to find by glob and filter by modified by state and simply use the latest modified file
each and every time so we don't fail on multiple file detections

View File

@ -0,0 +1,5 @@
import os
HERE = os.path.abspath(os.path.dirname(__file__))
import logging
LOG = logging.getLogger('GI ')
logging.basicConfig(level=logging.INFO) # oArgs.loglevel) #

View File

@ -0,0 +1,185 @@
import os
import sys
import json
import argparse
import pathlib
import copy
import logging
try:
import coloredlogs
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'spam=22;debug=28;verbose=34;notice=220;warning=202;success=118,bold;error=124;critical=background=red'
except ImportError as e:
logging.log(logging.DEBUG, f"coloredlogs not available: {e}")
coloredlogs = None
from gentooimgr import LOG
import gentooimgr.common
import gentooimgr.config
import gentooimgr.configs
def main(args):
'''Gentoo Cloud Image Builder Utility'''
import gentooimgr.config
configjson = gentooimgr.config.determine_config(args)
prefix = args.temporary_dir
LOG.info(f'Gentoo Cloud Image Builder Utility {args.action}')
if args.action == "build":
import gentooimgr.builder
gentooimgr.builder.build(args, configjson)
elif args.action == "run":
import gentooimgr.run
gentooimgr.run.run(args, configjson)
elif args.action == "test":
# empty
import gentooimgr.test
elif args.action == "clean":
# empty
import gentooimgr.clean
elif args.action == "status":
import gentooimgr.status
gentooimgr.status.print_template(args, configjson)
elif args.action == "install":
import gentooimgr.install
gentooimgr.install.configure(args, configjson)
elif args.action == "command":
import gentooimgr.command
gentooimgr.command.command(configjson)
elif args.action == "chroot":
import gentooimgr.chroot
gentooimgr.chroot.chroot(path=args.mountpoint, shell="/bin/bash")
elif args.action == "unchroot":
import gentooimgr.chroot
gentooimgr.chroot.unchroot(path=args.mountpoint)
elif args.action == "shrink":
import gentooimgr.shrink
fname = gentooimgr.shrink.shrink(args, configjson, stamp=args.stamp)
print(f"Shrunken image at {fname}, {os.path.getsize(fname)}")
elif args.action == "kernel":
import gentooimgr.kernel
gentooimgr.kernel.build_kernel(args, configjson)
return 0
if __name__ == "__main__":
"""Gentoo Cloud Image Builder Utility"""
parser = argparse.ArgumentParser(prog="gentooimgr", description="Gentoo Image Builder Utility")
parser.add_argument("-c", "--config", nargs='?', type=pathlib.Path,
help="Path to a custom conf file")
parser.add_argument("--config-cloud", action="store_const", const="cloud.json", dest="config",
help="Use cloud init configuration")
parser.add_argument("--config-base", action="store_const", const="base.json", dest="config",
help="Use a minimal base Gentoo configuration")
parser.add_argument("-t", "--temporary-dir", nargs='?', type=pathlib.Path,
default=pathlib.Path(os.getcwd()), help="Path to temporary directory for downloading files")
parser.add_argument("-j", "--threads", type=int, default=gentooimgr.config.THREADS,
help="Number of threads to use for building and emerging software")
parser.add_argument("-l", "--loglevel", type=int, default=logging.INFO,
help="python logging level <= 50, INFO=20")
parser.add_argument("-y", "--days", type=int, default=7, # gentooimgr.config.DAYS
help="Number of days before the files are redownloaded")
parser.add_argument("-d", "--download-dir", type=pathlib.Path, default=os.getcwd(),
help="Path to the desired download directory (default: current)")
parser.add_argument("--openrc", dest="profile", action="store_const", const="openrc",
help="Select OpenRC as the Gentoo Init System")
parser.add_argument("--systemd", dest="profile", action="store_const", const="systemd",
help="Select SystemD as the Gentoo Init System")
parser.add_argument("-f", "--force", action="store_true",
help="Let action occur at potential expense of data loss or errors (applies to clean and cloud-cfg)")
parser.add_argument("--format", default="qcow2", help="Image format to generate, default qcow2")
parser.add_argument("--portage", default=None, type=pathlib.Path, nargs='?',
help="Extract the specified portage package onto the filesystem")
parser.add_argument("--stage3", default=None, type=pathlib.Path, nargs='?',
help="Extract the specified stage3 package onto the filesystem")
parser.add_argument("--kernel-dir", default="/usr/src/linux",
help="Where kernel is specified. By default uses the active linux kernel")
subparsers = parser.add_subparsers(help="gentooimgr actions", dest="action")
subparsers.required = True
# Build action
parser_build = subparsers.add_parser('build', help="Download and verify all the downloaded components for cloud image")
parser_build.add_argument("image", default=gentooimgr.config.GENTOO_IMG_NAME, type=str, nargs='?',
help="Specify the exact image (date) you want to build; ex: 20231112T170154Z. Defaults to downloading the latest image. If image exists, will automatically use that one instead of checking online.")
parser_build.add_argument("--size", default="12G", help="Size of image to build")
parser_build.add_argument("--no-verify", dest="verify", action="store_false", help="Do not verify downloaded iso")
parser_build.add_argument("--verify", dest="verify", action="store_true", default=True,
help="Verify downloaded iso")
parser_build.add_argument("--redownload", action="store_true", help="Overwrite downloaded files")
parser_run = subparsers.add_parser('run', help="Run a Gentoo Image in QEMU to process it into a cloud image")
parser_run.add_argument("--iso", default=None, type=pathlib.Path, nargs='?',
help="Mount the specified iso in qemu, should be reserved for live cd images")
parser_run.add_argument("image", default=gentooimgr.config.GENTOO_IMG_NAME,
type=pathlib.Path, nargs="?",
help="Run the specified image in qemu")
parser_run.add_argument("-m", "--mounts", nargs='+', default=[],
help="Path to iso files to mount into the running qemu instance")
parser_test = subparsers.add_parser('test', help="Test whether image is a legitamite cloud configured image")
parser_clean = subparsers.add_parser('clean', help="Remove all downloaded files")
# --force also applies to clean action
parser_status = subparsers.add_parser('status', help="Review information, downloaded images and configurations")
parser_install = subparsers.add_parser("install", help="Install Gentoo on a qemu guest. Defaults to "
"--config-base with --kernel-dist if the respective --config or --kernel options are not provided.")
parser_install.add_argument("--kernel-dist", action="store_true",
help="Use a distribution kernel in the installation. Overrides all other kernel options.")
parser_install.add_argument("--kernel-virtio", action="store_true", help="Include virtio support in non-dist kernels")
parser_install.add_argument("--kernel-g5", action="store_true", help="Include all kernel config options for PowerMac G5 compatibility")
parser_chroot = subparsers.add_parser("chroot", help="Bind mounts and enter chroot with shell on guest. Unmounts binds on shell exit")
parser_chroot.add_argument("mountpoint", nargs='?', default=gentooimgr.config.GENTOO_MOUNT,
help="Point to mount and run the chroot and shell")
parser_unchroot = subparsers.add_parser("unchroot", help="Unmounts chroot filesystems")
parser_unchroot.add_argument("mountpoint", nargs='?', default=gentooimgr.config.GENTOO_MOUNT,
help="Point to mount and run the chroot and shell")
parser_cmd = subparsers.add_parser('command', help="Handle bind mounts and run command(s) in guest chroot, then unmount binds")
parser_cmd.add_argument("cmds", nargs='*',
help="Commands to run (quote each command if more than one word, ie: \"grep 'foo'\" \"echo foo\")")
parser_shrink = subparsers.add_parser('shrink', help="Take a finalized Gentoo image and rearrange it for smaller size")
parser_shrink.add_argument("img", type=pathlib.Path, help="Image to shrink")
parser_shrink.add_argument("--stamp", nargs='?', default=None,
help="By default a timestamp will be added to the image name, otherwise provide "
"a hardcoded string to add to the image name. Result: gentoo-[stamp].img")
parser_kernel = subparsers.add_parser('kernel', help="Build the kernel based on configuration and optional --kernel-dist flag")
args = parser.parse_args()
assert args.loglevel < 59
if coloredlogs:
# https://pypi.org/project/coloredlogs/
coloredlogs.install(level=args.loglevel,
logger=LOG,
# %(asctime)s,%(msecs)03d %(hostname)s [%(process)d]
fmt='%(name)s %(levelname)s %(message)s'
)
else:
logging.basicConfig(level=args.loglevel) # logging.INFO
logging.basicConfig(level=args.loglevel)
isos = gentooimgr.common.find_iso(args.download_dir)
if args.action == "run" and args.iso is None and len(isos) > 1:
LOG.error(f"Error: multiple iso files were found in {args.download_dir}, please specify one using `--iso [iso]`")
sys.exit(1)
main(args)

View File

@ -0,0 +1,26 @@
import os
import argparse
from gentooimgr import LOG
import gentooimgr.config as config
import gentooimgr.download as download
import gentooimgr.qemu as qemu
import gentooimgr.common
import requests
def build(args: argparse.Namespace, config: dict) -> None:
LOG.info(": build")
iso = config.get("iso") or download.download(args)
stage3 = config.get("stage3") or download.download_stage3(args)
portage = config.get("portage") or download.download_portage(args)
filename = f"{args.image}.{args.format}"
image = qemu.create_image(args, config)
if not os.path.exists(image):
raise Exception(f"Image {image} does not exist")
is_default = os.path.basename(image) == filename
LOG.info(image)
LOG.info(f"Image {image} build successfully.\nRun `python -m gentooimgr run{' ' + image if not is_default else ''} --iso {iso}`")
return image

View File

@ -0,0 +1,54 @@
import os
import sys
from subprocess import Popen, PIPE
import gentooimgr.config
def bind(mount=gentooimgr.config.GENTOO_MOUNT, verbose=True):
mounts = [
["mount", "--types", "proc", "/proc", os.path.join(mount, "proc")],
["mount", "--rbind", "/sys", os.path.join(mount, "sys")],
["mount", "--make-rslave", os.path.join(mount, "sys")],
["mount", "--rbind", "/dev", os.path.join(mount, "dev")],
["mount", "--make-rslave", os.path.join(mount, "dev")],
["mount", "--bind", "/run", os.path.join(mount, "run")],
["mount", "--make-slave", os.path.join(mount, "run")],
]
for mcmd in mounts:
if verbose:
print(f"\t:: {' '.join(mcmd)}")
proc = Popen(mcmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
sys.stderr.write(f"{stderr}\n")
sys.exit(proc.returncode)
def unbind(mount=gentooimgr.config.GENTOO_MOUNT, verbose=True):
os.chdir("/")
if not os.path.exists(mount):
sys.stderr.write(f"Mountpoint {mount} does not exist\n")
return
unmounts = [
["umount", os.path.join(mount, 'dev', 'shm')],
["umount", os.path.join(mount, 'dev', 'pts')],
["umount", "-l", os.path.join(mount, 'dev')],
["umount", "-R", mount]
]
for uncmd in unmounts:
if verbose:
print(f"\t:: {' '.join(uncmd)}")
proc = Popen(uncmd)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
sys.stderr.write(f"{stderr}\n")
continue
def chroot(path=gentooimgr.config.GENTOO_MOUNT, shell="/bin/bash"):
bind(mount=path)
os.chroot(path)
os.chdir(os.sep)
os.system(shell)
unchroot(path=path) # May fail if we do this automatically
def unchroot(path=gentooimgr.config.GENTOO_MOUNT):
unbind(mount=path)

View File

@ -0,0 +1,14 @@
import sys
from subprocess import Popen, PIPE
import gentooimgr.chroot
def command(config, *args):
gentooimgr.chroot.bind()
for a in args:
proc = Popen(a, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
sys.stderr.write(f"{stderr}\n")
break
gentooimgr.chroot.unbind()

View File

@ -0,0 +1,124 @@
import os
import sys
import time
import copy
import json
from subprocess import Popen, PIPE
import gentooimgr.config
from gentooimgr import LOG
def older_than_a_day(fullpath):
if not os.path.exists(fullpath):
return True # Don't fail on missing files
filetime = os.path.getmtime(fullpath)
return time.time() - filetime > (gentooimgr.config.DAY_IN_SECONDS *
gentooimgr.config.DAYS)
def find_iso(download_dir):
LOG.info(f"Looking for iso in {download_dir}")
name = None
ext = None
found = []
for f in os.listdir(download_dir):
name, ext = os.path.splitext(f)
if ext == ".iso":
found.append(os.path.join(download_dir, f))
return found
def make_iso_from_dir(mydir):
""" Generates an iso with gentooimgr inside it for use inside a live cd guest
:Returns:
path to iso that was created or NoneType if mydir is not found
"""
if not os.path.exists(mydir):
LOG.warn(f"\t:: dir not found {mydir}")
return
LOG.info(f"\t:: Making ISO with dir of {mydir}")
path = os.path.join(mydir, "..", "cloudgen.iso")
proc = Popen(["mkisofs",
"--input-charset", "utf-8",
"-J",
"-r",
"-V", "gentooimgr",
"-m", "*.img",
"-m", "*.iso",
"-o", path,
mydir
], stdout=PIPE, stderr=PIPE)
proc.communicate()
return path
def portage_from_dir(download_dir, filename=None):
"""Find portage file from directory. Will do a check in os.listdir() for portage*.tar.bz2.
If a filename is provided, this function either returns that filename assuming it exists in d,
or return None. If filename is None, this looks through all entries for portage files and if
only one exists, returns it, otherwise None.
"""
assert download_dir, f"empty {download_dir} for for portage"
LOG.info(f"Looking for portage in {download_dir}")
found = []
for f in os.listdir(download_dir):
if filename is not None:
if filename == f:
found.append(f)
elif f.startswith("portage") and f.endswith(".tar.xz"):
found.append(f)
if len(found) > 1:
LOG.error("\tEE: More than one portage file exists, please specify the exact portage file with --portage [file] or remove all others\n")
LOG.error(''.join([f"\t{f}\n" for f in found]))
LOG.error(f"in {download_dir}\n")
sys.exit(1)
return found[0] if found else None
def stage3_from_dir(d, filename=None):
"""Find stage3 file from directory. Will do a check in os.listdir() for stage3*.tar.xz.
If a filename is provided, this function either returns that filename assuming it exists in d,
or return None. If filename is None, this looks through all entries for stage3 files and if
only one exists, returns it, otherwise None.
"""
found = []
for f in os.listdir(d):
if filename is not None:
if filename == f:
found.append(f)
elif f.startswith("stage3") and f.endswith(".tar.xz"):
found.append(f)
if len(found) > 1:
LOG.error("More than one stage3 file exists, please specify the exact stage3 file or remove all others\n")
LOG.error(''.join([f"\t{f}\n" for f in found]))
LOG.error(f"in {d}\n")
return None
return found[0] if found else None
def get_image_name(args, config):
image = config.get("imagename", "gentoo."+args.format)
if image is None:
image = "gentoo."+args.format
return image
#
# def load_config(args):
# cfg = generatecfg(args)
# if args.config:
# override = generatecfg(args, config=args.config)
# cfg.update(cfgoverride)
#
# if cfg.get("portage") is None:
# cfg['portage'] = portage_from_dir(args.download_dir, filename=args.portage or cfg.get("portage"))
# if cfg.get("stage3") is None:
# cfg['stage3'] = stage3_from_dir(args.download_dir, filename=args.stage3 or cfg.get("stage3"))
#
# return cfg
#

View File

@ -0,0 +1,132 @@
import os
import json
import sys
import argparse
from gentooimgr import LOG
import gentooimgr.configs
import multiprocessing
# A day in seconds:
DAY_IN_SECONDS = 60*60*24
# days until the iso is old
DAYS = 1
# Define threads to compile packages with
THREADS = multiprocessing.cpu_count()
# URL to latest image text file, defaults to amd64. This is parsed to find latest iso to download
ARCHITECTURE = "amd64"
GENTOO_BASE_ISO_URL = f"https://distfiles.gentoo.org/releases/{ARCHITECTURE}/autobuilds/current-install-{ARCHITECTURE}-minimal/"
GENTOO_BASE_STAGE_OPENRC_URL = f"https://distfiles.gentoo.org/releases/{ARCHITECTURE}/autobuilds/current-stage3-{ARCHITECTURE}-openrc/"
GENTOO_BASE_STAGE_SYSTEMD_URL = f"https://distfiles.gentoo.org/releases/{ARCHITECTURE}/autobuilds/current-stage3-{ARCHITECTURE}-systemd/"
GENTOO_LATEST_ISO_FILE = f"latest-install-{ARCHITECTURE}-minimal.txt"
GENTOO_LATEST_STAGE_OPENRC_FILE = f"latest-stage3-{ARCHITECTURE}-openrc.txt"
GENTOO_LATEST_STAGE_SYSTEMD_FILE = f"latest-stage3-{ARCHITECTURE}-systemd.txt"
GENTOO_PORTAGE_FILE = "http://distfiles.gentoo.org/snapshots/portage-latest.tar.xz" # No architecture, no txt files to determine latest.
GENTOO_MOUNT = "/mnt/gentoo"
GENTOO_IMG_NAME = "gentoo.qcow2"
GENTOO_FILE_HASH_RE = r"^Hash\: ([\w]*)$"
GENTOO_FILE_ISO_RE = r"^(install-[\w\-_\.]*.iso) ([\d]*)"
GENTOO_FILE_ISO_HASH_RE = r"^([\w]*) (install-[\w\-_\.]*.iso)$"
GENTOO_FILE_STAGE3_RE = r"^(stage3-[\w\-_\.]*.tar.*) ([\d]*)"
GENTOO_FILE_STAGE3_HASH_RE = r"^([\w]*) (stage3-[\w\-_\.]*.tar.*)$"
# TODO: Repo regex to replace attributes, use function to do so as find key will change.
def replace_repos_conf(key, value):
pass
CLOUD_MODULES = [
"iscsi_tcp"
]
def load_config(path):
assert path, "load config called with nothing"
if os.path.exists(path):
with open(path, 'r') as f:
try:
return json.loads(f.read())
except Exception as e:
LOG.error(f"ERROR loading {path}")
raise
return {}
def load_default_config(config_name):
"""This is called when a --config option is set. --kernel options update the resulting config, whether
it be 'base' or other.
If user is supplying their own configuration, this is not called.
"""
name, ext = os.path.splitext(config_name)
if not name in gentooimgr.configs.KNOWN_CONFIGS:
return {}
json_file = os.path.join(gentooimgr.configs.CONFIG_DIR, config_name)
ret = {}
with open(json_file, 'r') as f:
try:
ret = json.loads(f.read())
except Exception as e:
LOG.error(f"loading {json_file} {e}")
return ret
def inherit_config(config: dict) -> dict:
"""Returns the json file that the inherit key specifies; will recursively update if inherit values are set.
"""
configuration = load_default_config(config.get("inherit"))
if not configuration:
configuration = load_config(config.get("inherit"))
if not configuration:
sys.stderr.write(f"\tWW: Warning: Inherited configuration {config.get('inherit')} is not found.\n")
return {}
if configuration.get("inherit"):
configuration.update(inherit_config(configuration.get("inherit")))
return configuration
def determine_config(args: argparse.Namespace) -> dict:
"""Check argparser options and return the most valid configuration
The "package" key/value object overrides everything that is set, it does not update() them.
If you override "base" package set, it's exactly what you set. It makes more sense to do it this way.
For example, if you have a dist kernel config, you don't want the base.json to update and include all
non-dist kernel options as it would add a lot of used space for unused functionality.
The package set is only overridden in the top level json configuration file though;
If you have multiple inherits, those package sets will be combined before the parent package set overrides
with the keys that are set.
If you have base.json and base2.json that contain multiple layers of "base" packages, ie: base: ['foo'] and base2: ['bar']
then you will have in yours.json: packages { base: ['foo', 'bar'] } and unless you set "base", that is what you'll get.
If you check `status` action, it will flatten all configurations into one, so the "inherit" key will always be null.
:Returns:
- configuration from json to dict
"""
# Check custom configuration
configuration = load_default_config(args.config or 'base.json')
if not configuration and args.config:
configuration = load_config(args.config)
if not configuration:
LOG.error(f"\tWW: Warning: Configuration {args.config} is empty\n")
else:
if configuration.get("inherit"):
# newpkgs = configuration.get("packages", {})
inherited = inherit_config(configuration)
new_packages = configuration.get("packages", {})
old_packages = inherited.get("packages", {})
inherited.update(configuration)
# Set back old package dict and then update only what is set in new:
inherited['packages'] = old_packages
for key, pkgs in new_packages.items():
if pkgs:
inherited['packages'][key] = pkgs
return inherited
return configuration

View File

@ -0,0 +1,153 @@
import os
CONFIG_DIR = os.path.abspath(os.path.dirname(__file__))
__all__ = ["CONFIG_DIR", "CLOUD_YAML", "HOST_TMPL", "HOSTNAME", "KNOWN_CONFIGS"]
# List of configurations that end in '.json' within the configs/ directory
KNOWN_CONFIGS = [
"base",
"cloud"
]
# Currently we handle the writing of additional files by having data defined here and checking options.
# this isn't ideal. TODO: Make this better.
CLOUD_YAML = """
# The top level settings are used as module
# and system configuration.
# A set of users which may be applied and/or used by various modules
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
- default
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the above $user (ubuntu)
disable_root: true
ssh_pwauth: false
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
# this may be helpful in certain scenarios
# resize_rootfs_tmp: /dev
syslog_fix_perms: root:root
ssh_deletekeys: false
ssh_genkeytypes: [rsa, dsa]
# This can be 'template'
# which would look for /etc/cloud/templates/hosts.gentoo.tmpl
# or 'localhost'
# or False / commented out to disable altogether
manage_etc_hosts: template
# Example datasource config
# datasource:
# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
# The modules that run in the 'init' stage
cloud_init_modules:
- seed_random
- bootcmd
- write-files
- growpart
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- ca-certs
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- disk_setup
- mounts
- ssh-import-id
- set-passwords
- package-update-upgrade-install
- timezone
- puppet
- chef
- salt-minion
- mcollective
- disable-ec2-metadata
- runcmd
# The modules that run in the 'final' stage
cloud_final_modules:
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
- power-state-change
# System and/or distro specific settings
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
distro: gentoo
# Default user name + that default users groups (if added/used)
default_user:
name: gentoo
lock_passwd: True
gecos: Gentoo
groups: [users, wheel]
primary_group: users
no-user-group: true
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
"""
HOST_TMPL = """
## template:jinja
{#
This file /etc/cloud/templates/hosts.gentoo.tmpl is only utilized
if enabled in cloud-config. Specifically, in order to enable it
you need to add the following to config:
manage_etc_hosts: template
-#}
# Your system has configured 'manage_etc_hosts' as 'template'.
# As a result, if you wish for changes to this file to persist
# then you will need to either
# a.) make changes to the master file in /etc/cloud/templates/hosts.gentoo.tmpl
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 {{fqdn}} {{hostname}}
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
# The following lines are desirable for IPv6 capable hosts
::1 {{fqdn}} {{hostname}}
::1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
"""
HOSTNAME = """
# Set to the hostname of this machine
if [ -f /etc/hostname ];then
hostname=$(cat /etc/hostname 2> /dev/null | cut -d"." -f1 2> /dev/null)
else
hostname="localhost"
fi
"""

View File

@ -0,0 +1,82 @@
{
"inherit": null,
"imgsize": "20G",
"memory": 4096,
"mountpoint": "/mnt/gentoo",
"imagename": null,
"initsys": "openrc",
"licensefiles": {
"kernel": ["sys-kernel/linux-firmware linux-fw-redistributable"]
},
"kernel": {
"path": "/etc/kernels/config.d/gentooimgr-base.config"
},
"repos": {
"/etc/portage/repos.conf/gentoo.conf": {
"gentoo": {
"sync-uri": "rsync://192.168.254.20/gentoo-portage"
}
}
},
"packages": {
"base": [
"acpid",
"dmidecode",
"syslog-ng",
"cronie",
"dhcpcd",
"mlocate",
"xfsprogs",
"dosfstools",
"sudo",
"postfix",
"parted",
"portage-utils",
"gentoo-bashcomp",
"tmux",
"app-misc/screen",
"dev-vcs/git",
"net-misc/curl",
"usbutils",
"pciutils",
"logrotate",
"gptfdisk",
"sys-block/gpart",
"net-misc/ntp",
"net-fs/nfs-utils",
"app-emulation/qemu-guest-agent",
"linux-firmware"
],
"additional": ["app-editors/vim"],
"oneshots": [
"portage"
],
"singles": [
"app-portage/eix",
"dev-util/cmake"
],
"keepgoing": [
"openssh"
],
"bootloader": [
"grub:2"
],
"kernel": [
"sys-kernel/genkernel",
"gentoo-sources",
"gentoolkit"
]
},
"services": {
"syslog-ng": "default",
"cronie": "default",
"acpid": "default",
"ntp": "default",
"qemu-guest-agent": "default"
},
"iso": null,
"portage": null,
"stage3": null,
"disk": "/dev/sda",
"partition": 1
}

View File

@ -0,0 +1,77 @@
{
"inherit": null,
"imgsize": "12G",
"memory": 4096,
"mountpoint": "/mnt/gentoo",
"imagename": null,
"initsys": "openrc",
"licensefiles": {
"kernel": ["sys-kernel/linux-firmware linux-fw-redistributable"]
},
"repos": {
"/etc/portage/repos.conf/gentoo.conf": {
"sync-uri": "rsync://192.168.254.20/gentoo-portage"
}
},
"packages": {
"base": [
"acpid",
"dmidecode",
"syslog-ng",
"cronie",
"dhcpcd",
"mlocate",
"xfsprogs",
"dosfstools",
"sudo",
"postfix",
"app-editors/vim",
"parted",
"portage-utils",
"bash-completion",
"gentoo-bashcomp",
"tmux",
"app-misc/screen",
"dev-vcs/git",
"net-misc/curl",
"usbutils",
"pciutils",
"logrotate",
"gptfdisk",
"sys-block/gpart",
"net-misc/ntp",
"net-fs/nfs-utils",
"linux-firmware"
],
"additional": [],
"oneshots": [
"portage"
],
"singles": [
"app-portage/eix",
"dev-util/cmake"
],
"keepgoing": [
"openssh"
],
"bootloader": [
"grub:2"
],
"kernel": [
"sys-kernel/genkernel",
"gentoo-sources",
"gentoolkit"
]
},
"services": {
"syslog-ng": "default",
"cronie": "default",
"acpid": "default",
"ntp": "default"
},
"iso": null,
"portage": null,
"stage3": null,
"disk": null,
"partition": 1
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
{
"inherit": "base.json",
"packages": {
"additional": [
"app-emulation/cloud-init",
"sys-block/open-iscsi",
"app-editors/mg",
"net-analyzer/openbsd-netcat"
]
},
"disk": "/dev/vda",
"kernel": {
"path": "/etc/kernels/config.d/gentooimgr-cloud.config",
"config": "cloud.config"
},
"partition": 1
}

View File

@ -0,0 +1,195 @@
"""Module to handle downloading and verification of Gentoo images
To ensure accuracy, we re-download every .txt file if it's older than one day.
We assume that people building a cloud configured image want what is most up to date.
If you have a specific image you want built over and over regardless, create a config
file and load it in using -c/--config that points GENTOO_* values to the files you want.
"""
import os
import re
import sys
from datetime import date
import hashlib
import progressbar
from urllib.request import urlretrieve
import tempfile
from gentooimgr import LOG
import gentooimgr.config as config
from gentooimgr.common import older_than_a_day
hashpattern = re.compile(config.GENTOO_FILE_HASH_RE, re.MULTILINE)
isopattern = re.compile(config.GENTOO_FILE_ISO_RE, re.MULTILINE)
isohashpattern = re.compile(config.GENTOO_FILE_ISO_HASH_RE, re.MULTILINE)
stage3pattern = re.compile(config.GENTOO_FILE_STAGE3_RE, re.MULTILINE)
stage3hashpattern = re.compile(config.GENTOO_FILE_STAGE3_HASH_RE, re.MULTILINE)
class DownloadProgressBar():
def __init__(self):
self.progress = None
def __call__(self, block_num, block_size, total_size):
if not self.progress:
self.progress = progressbar.ProgressBar(maxval=total_size)
self.progress.start()
downloaded = block_num * block_size
if downloaded < total_size:
self.progress.update(downloaded)
else:
self.progress.finish()
def parse_latest_iso_text(fullpath) -> tuple:
"""Returns a tuple of (hash type, iso name, iso bytes)"""
with open(fullpath) as f:
content = f.read()
m_hash = hashpattern.search(content)
m_iso = isopattern.search(content)
return (m_hash.group(1) if not m_hash is None else None,
m_iso.group(1) if not m_iso is None else None,
m_iso.group(2) if not m_iso is None else None,)
def parse_latest_stage3_text(fullpath) -> tuple:
"""Returns a tuple of (hash type, iso name, iso bytes)
"""
with open(fullpath) as f:
content = f.read()
m_hash = hashpattern.search(content)
m_stage3 = stage3pattern.search(content)
return (m_hash.group(1) if not m_hash is None else None,
m_stage3.group(1) if not m_stage3 is None else None,
m_stage3.group(2) if not m_stage3 is None else None,)
def verify(args, _type: str, baseurl: str, hashpattern, filename: str) -> bool:
"""Downloads hash file and run a hash check on the file
:Parameters:
- args: Namespace of parsed arguments
- _type: str hash type
- baseurl: (remote) folder where hashsum file is contained
- hashpattern:
- filename: str name of file to check (used to download corresponding hash file)
A install-amd64-minimal-2023111iso2T170154Z.iso file will have a
install-amd64-minimal-20231112T170154Z.iso.sha256 for example.
:Returns:
Whether iso was verified using the specified hash
"""
thefile = os.path.join(args.download_dir, filename)
LOG.info(f"verifying hash of {thefile}")
digest = hashlib.file_digest(open(thefile, 'rb'), _type.lower())
filename = filename+f".{_type.lower()}" # Update to hash file
hashfile = os.path.join(baseurl, filename)
fullpath = os.path.join(args.download_dir, os.path.basename(hashfile))
if not os.path.exists(fullpath) or args.redownload or older_than_a_day(fullpath):
LOG.info(f"Downloading {filename}")
urlretrieve(hashfile, fullpath, DownloadProgressBar())
hd = digest.hexdigest()
with open(fullpath, 'r') as f:
content = f.read()
m_hash = hashpattern.search(content)
_hash = m_hash.group(1)
assert hd == _hash, f"Hash mismatch {hd} != {_hash}"
def download_stage3(args, url=None) -> str:
if url is None:
if args.profile == "systemd":
url = os.path.join(config.GENTOO_BASE_STAGE_SYSTEMD_URL, config.GENTOO_LATEST_STAGE_SYSTEMD_FILE)
else:
url = os.path.join(config.GENTOO_BASE_STAGE_OPENRC_URL, config.GENTOO_LATEST_STAGE_OPENRC_FILE)
filename = os.path.basename(url)
fullpath = os.path.join(args.download_dir, filename)
if not os.path.exists(fullpath) or args.redownload or older_than_a_day(fullpath):
print(f"Downloading {filename}")
urlretrieve(url, fullpath, DownloadProgressBar())
hashtype, latest, size = parse_latest_stage3_text(fullpath)
size = int(size)
filename = latest
fullpath = os.path.join(args.download_dir, filename)
if not os.path.exists(fullpath) or args.redownload:
LOG.info(f"Downloading {filename}")
url = os.path.join(
config.GENTOO_BASE_STAGE_SYSTEMD_URL if args.profile == "systemd" else \
config.GENTOO_BASE_STAGE_OPENRC_URL,
filename)
urlretrieve(url, fullpath, DownloadProgressBar())
# Verify byte size
stage3size = os.path.getsize(fullpath)
assert size == stage3size, f"Stage 3 size {size} does not match expected value {stage3size}."
verify(args, hashtype, config.GENTOO_BASE_STAGE_SYSTEMD_URL if args.profile == "systemd" else \
config.GENTOO_BASE_STAGE_OPENRC_URL, stage3hashpattern, filename)
return fullpath
def download_portage(args, url=None) -> str:
"""Handle downloading of portage system for installation into cloud image
We always download the latest portage package and rename it to today's date.
If using today's date to grab portage, sometimes depending on timezone, the
package won't be available. If always using latest, worst case scenario is you
have a portage package a day late.
"""
if url is None:
url = config.GENTOO_PORTAGE_FILE
base = os.path.basename(url) # Uses 'latest' filename
today = date.today()
# Write latest to today's date so we don't constantly redownload, but
filename = base.replace("latest", "%d%d%d" % (today.year, today.month, today.day))
fullpath = os.path.join(args.download_dir, filename)
# Portage is always "latest" in this case, so definitely check if older than a day and redownload.
if not os.path.exists(fullpath) or args.redownload or older_than_a_day(fullpath):
LOG.info(f"Downloading {filename} ({base})")
urlretrieve(url, fullpath, DownloadProgressBar())
return fullpath
def download(args, url=None) -> str:
"""Download txt file with iso name and hash type
:Parameters:
- args: Namespace with parsed arguments
- url: str or None. If None, will generate a url to the latest minimal install iso
:Returns:
Full path to the downloaded iso file
Will cause program to exit if iso byte size fails to match expected value.
"""
if url is None:
url = os.path.join(config.GENTOO_BASE_ISO_URL, config.GENTOO_LATEST_ISO_FILE)
# Download the latest txt file
filename = os.path.basename(url)
fullpath = os.path.join(args.download_dir, filename)
if not os.path.exists(fullpath) or args.redownload or older_than_a_day(fullpath):
LOG.info(f"Downloading {fullpath}")
urlretrieve(url, fullpath, DownloadProgressBar())
hashtype, latest, size = parse_latest_iso_text(fullpath)
size = int(size)
# Download the iso file
filename = latest
fullpath = os.path.join(args.download_dir, filename)
if not os.path.exists(fullpath) or args.redownload:
LOG.info(f"Downloading {filename}")
url = os.path.join(config.GENTOO_BASE_ISO_URL, filename)
urlretrieve(url, fullpath, DownloadProgressBar())
# Verify byte size
isosize = os.path.getsize(fullpath)
assert size == isosize, f"ISO size {size} does not match expected value {isosize}."
verify(args, hashtype, config.GENTOO_BASE_ISO_URL, isohashpattern, filename)
return fullpath

View File

@ -0,0 +1,359 @@
"""Configure a Gentoo guest with cloud image settings
This step keeps track of how far it's gotten, so re-running this command
will continue on if an error was to occur, unless --start-over flag is given.
"""
import os
import sys
import shutil
import configparser
from subprocess import Popen, PIPE
import logging
import traceback
import gentooimgr.config
import gentooimgr.configs
import gentooimgr.common
import gentooimgr.chroot
import gentooimgr.kernel
from gentooimgr import LOG
from gentooimgr import HERE
from gentooimgr.configs import *
FILES_DIR = os.path.join(HERE, "..")
def step1_diskprep(args, cfg):
LOG.info("\t:: Step 1: Disk Partitioning")
# http://rainbow.chard.org/2013/01/30/how-to-align-partitions-for-best-performance-using-parted/
# http://honglus.blogspot.com/2013/06/script-to-automatically-partition-new.html
cmds = [
['parted', '-s', f'{cfg.get("disk")}', 'mklabel', 'msdos'],
['parted', '-s', f'{cfg.get("disk")}', 'mkpart', 'primary', '2048s', '100%'],
['partprobe'],
['mkfs.ext4', '-FF', f'{cfg.get("disk")}{cfg.get("partition", 1)}']
]
for c in cmds:
proc = Popen(c, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
completestep(1, "diskprep")
def step2_mount(args, cfg):
LOG.info(f'\t:: Step 2: Mounting {gentooimgr.config.GENTOO_MOUNT}')
proc = Popen(["mount", f'{cfg.get("disk")}{cfg.get("partition")}', cfg.get("mountpoint")])
proc.communicate()
completestep(2, "mount")
def step3_stage3(args, cfg):
LOG.info(f'\t:: Step 3: Stage3 Tarball')
stage3 = cfg.get("stage3") or args.stage3 # FIXME: auto detect stage3 images in mountpoint and add here
if not stage3:
stage3 = gentooimgr.common.stage3_from_dir(FILES_DIR)
proc = Popen(["tar", "xpf", os.path.abspath(stage3), "--xattrs-include='*.*'", "--numeric-owner", "-C",
f'{cfg.get("mountpoint")}'])
proc.communicate()
completestep(3, "stage3")
def step4_binds(args, cfg):
LOG.info(f'\t:: Step 4: Binding Filesystems')
gentooimgr.chroot.bind(verbose=False)
completestep(4, "binds")
def step5_portage(args, cfg):
LOG.info(f'\t:: Step 5: Portage')
portage = cfg.get("portage") or args.portage
if not portage:
portage = gentooimgr.common.portage_from_dir(FILES_DIR)
proc = Popen(["tar", "xpf", portage, "-C", f"{cfg.get('mountpoint')}/usr/"])
proc.communicate()
# Edit portage
portage_env = os.path.join(cfg.get("mountpoint"), 'etc', 'portage', 'env')
os.makedirs(portage_env, exist_ok=True)
with open(os.path.join(portage_env, 'singlejob.conf'), 'w') as f:
f.write('MAKEOPTS="-j1"\n')
env_path = os.path.join(cfg.get("mountpoint"), 'etc', 'portage', 'package.env')
with open(env_path, 'w') as f:
f.write("app-portage/eix singlejob.conf\ndev-util/maturin singlejob.conf\ndev-util/cmake singlejob.conf")
completestep(5, "portage")
def step6_licenses(args, cfg):
LOG.info(f'\t:: Step 6: Licenses')
license_path = os.path.join(cfg.get("mountpoint"), 'etc', 'portage', 'package.license')
os.makedirs(license_path, exist_ok=True)
for f, licenses in cfg.get("licensefiles", {}).items():
with open(os.path.join(license_path, f), 'w') as f:
f.write('\n'.join(licenses))
completestep(6, "license")
def step7_repos(args, cfg):
LOG.info(f'\t:: Step 7: Repo Configuration')
repo_path = os.path.join(cfg.get("mountpoint"), 'etc', 'portage', 'repos.conf')
os.makedirs(repo_path, exist_ok=True)
# Copy from template
repo_file = os.path.join(repo_path, 'gentoo.conf')
shutil.copyfile(
os.path.join(cfg.get("mountpoint"), 'usr', 'share', 'portage', 'config', 'repos.conf'),
repo_file)
# Regex replace lines
cp = configparser.ConfigParser()
for repofile, data in cfg.get("repos", {}).items():
cp.read(cfg.get("mountpoint") + repofile) # repofile should be absolute path, do not use os.path.join.
for section, d in data.items():
if section in cp:
for key, val in d.items():
# Replace everything after the key with contents of value.
# Sed is simpler than using regex for this purpose.
cp.set(section, key, val)
else:
sys.stderr.write(f"\tWW No section {section} in {repofile}\n")
cp.write(open(cfg.get("mountpoint") + repofile, 'w'))
completestep(7, "repos")
def step8_resolv(args, cfg):
LOG.info(f'\t:: Step 8: Resolv')
proc = Popen(["cp", "--dereference", "/etc/resolv.conf", os.path.join(cfg.get("mountpoint"), 'etc')])
proc.communicate()
# Copy all step files and python module to new chroot
os.system(f"cp /tmp/*.step {cfg.get('mountpoint')}/tmp")
os.system(f"cp -r . {cfg.get('mountpoint')}/mnt/")
completestep(8, "resolv")
def step9_sync(args, cfg):
LOG.info(f"\t:: Step 9: sync")
LOG.info("\t\t:: Entering chroot")
os.chroot(cfg.get("mountpoint"))
os.chdir(os.sep)
os.system("source /etc/profile")
proc = Popen(["emerge", "--sync", "--quiet"])
proc.communicate()
LOG.info("\t\t:: Emerging base")
proc = Popen(["emerge", "--update", "--deep", "--newuse", "--keep-going", "@world"])
proc.communicate()
completestep(9, "sync")
def step10_emerge_pkgs(args, cfg):
LOG.info(f"\t:: Step 10: emerge pkgs")
packages = cfg.get("packages", {})
for oneshot_up in packages.get("oneshots", []):
proc = Popen(["emerge", "--oneshot", "--update", oneshot_up])
proc.communicate()
for single in packages.get("singles", []):
proc = Popen(["emerge", "-j1", single])
proc.communicate()
LOG.info(f"KERNEL PACKAGES {packages.get('kernel')}")
if packages.get("kernel", []):
cmd = ["emerge", "-j", str(args.threads)] + packages.get("kernel", [])
proc = Popen(cmd)
proc.communicate()
cmd = ["emerge", "-j", str(args.threads), "--keep-going"]
cmd += packages.get("keepgoing", [])
proc = Popen(cmd)
proc.communicate()
cmd = ["emerge", "-j", str(args.threads)]
cmd += packages.get("base", [])
cmd += packages.get("additional", [])
cmd += packages.get("bootloader", [])
LOG.info(cmd)
proc = Popen(cmd)
proc.communicate()
completestep(10, "pkgs")
def step11_kernel(args, cfg):
# at this point, genkernel will be installed
LOG.info(f"\t:: Step 11: kernel")
proc = Popen(["eselect", "kernel", "set", "1"])
proc.communicate()
if not args.kernel_dist:
os.chdir(args.kernel_dir)
threads = str(gentooimgr.config.THREADS)
gentooimgr.kernel.build_kernel(args, cfg)
completestep(11, "kernel")
def step12_grub(args, cfg):
LOG.info(f"\t:: Step 12: kernel")
proc = Popen(["grub-install", cfg.get('disk')])
proc.communicate()
code = proc.returncode
if code != 0:
sys.stderr.write(f"Failed to install grub on {cfg.get('disk')}\n")
sys.exit(code)
with open("/etc/default/grub", 'w') as f:
f.write(f"{gentooimgr.kernel.GRUB_CFG}")
proc = Popen(["grub-mkconfig", "-o", "/boot/grub/grub.cfg"])
proc.communicate()
completestep(12, "grub")
def step13_serial(args, cfg):
LOG.info(f"\t:: Step 13: Serial")
os.system("sed -i 's/^#s0:/s0:/g' /etc/inittab")
os.system("sed -i 's/^#s1:/s1:/g' /etc/inittab")
completestep(13, "serial")
def step14_services(args, cfg):
LOG.info(f"\t:: Step 14: Services")
for service in ["acpid", "syslog-ng", "cronie", "sshd", "cloud-init-local", "cloud-init", "cloud-config",
"cloud-final", "ntpd", "nfsclient"]:
if args.profile == "systemd":
proc = Popen(["systemctl", "enable", service])
else:
proc = Popen(["rc-update", "add", service, "default"])
proc.communicate()
completestep(14, "services")
def step15_ethnaming(args, cfg):
LOG.info(f"\t:: Step 15: Eth Naming")
completestep(15, "networking")
def step16_sysconfig(args, cfg):
LOG.info(f"\t:: Step 16: Sysconfig")
with open("/etc/timezone", "w") as f:
f.write("UTC")
proc = Popen(["emerge", "--config", "sys-libs/timezone-data"])
proc.communicate()
with open("/etc/locale.gen", "a") as f:
f.write("en_US.UTF-8 UTF-8\nen_US ISO-8859-1\n")
proc = Popen(["locale-gen"])
proc.communicate()
proc = Popen(["eselect", "locale", "set", "en_US.utf8"])
proc.communicate()
proc = Popen(["env-update"])
proc.communicate()
with open('/etc/sysctl.d/swappiness.conf', 'w') as f:
f.write("vm.swappiness = 0\n")
modloadpath = os.path.join(os.sep, 'etc', 'modules-load.d')
os.makedirs(modloadpath, exist_ok=True)
with open(os.path.join(modloadpath, 'cloud-modules.conf'), 'w') as f:
f.write('\n'.join(gentooimgr.config.CLOUD_MODULES))
cloudcfg = os.path.join(os.sep, 'etc', 'cloud')
if not os.path.exists(cloudcfg):
os.makedirs(cloudcfg, exist_ok=True)
os.makedirs(os.path.join(cloudcfg, 'templates'), exist_ok=True)
with open(os.path.join(cloudcfg, 'cloud.cfg'), 'w') as cfg:
cfg.write(f"{CLOUD_YAML}")
os.chmod(os.path.join(cloudcfg, "cloud.cfg"), 0o644)
with open(os.path.join(cloudcfg, "templates", "hosts.gentoo.tmpl"), 'w') as tmpl:
tmpl.write(f"{HOST_TMPL}") # FIXME:
os.chmod(os.path.join(cloudcfg, "templates", "hosts.gentoo.tmpl"), 0o644)
proc = Popen("sed -i 's/domain_name\,\ domain_search\,\ host_name/domain_search/g' /etc/dhcpcd.conf", shell=True)
proc.communicate()
hostname = os.path.join(os.sep, 'etc', 'conf.d', 'hostname')
with open(hostname, 'w') as f:
f.write(f"{HOSTNAME}\n")
os.chmod(hostname, 0o644)
proc = Popen(["eix-update"])
proc.communicate()
os.remove(os.path.join(os.sep, 'etc', 'resolv.conf'))
completestep(16, "sysconfig")
def step17_fstab(args, cfg):
LOG.info(f"\t:: Step 17: fstab")
with open(os.path.join(os.sep, 'etc', 'fstab'), 'a') as fstab:
fstab.write(f"{cfg.get('disk')}\t/\text4\tdefaults,noatime\t0 1\n")
completestep(17, "fstab")
def completestep(step, stepname, prefix='/tmp'):
with open(os.path.join(prefix, f"{step}.step"), 'w') as f:
f.write("done.") # text in this file is not currently used.
def getlaststep(prefix='/tmp'):
i = 1
found = False
while not found:
if os.path.exists(f"{i}.step"):
i += 1
else:
found = True
return i
def stepdone(step, prefix='/tmp'):
return os.path.exists(os.path.join(prefix, f"{step}.step"))
def configure(args, config: dict):
# Load configuration
if not os.path.exists(gentooimgr.config.GENTOO_MOUNT):
if not args.force:
# We aren't in a gentoo live cd are we?
sys.stderr.write("Your system doesn't look like a gentoo live cd, exiting for safety.\n"
"If you want to continue, use --force option and re-run `python -m gentooimgr install` with your configuration\n")
sys.exit(1)
else:
# Assume we are root as per live cd, otherwise user should run this as root as a secondary confirmation
os.makedirs(gentooimgr.config.GENTOO_MOUNT)
# disk prep
cfg = config
if not stepdone(1): step1_diskprep(args, cfg)
# mount root
if not stepdone(2): step2_mount(args, cfg)
# extract stage
if not stepdone(3): step3_stage3(args, cfg)
# mount binds
if not stepdone(4): step4_binds(args, cfg)
# extract portage
if not stepdone(5): step5_portage(args, cfg)
# Set licenses
if not stepdone(6): step6_licenses(args, cfg)
# repos.conf
if not stepdone(7): step7_repos(args, cfg)
# portage env files and resolv.conf
if not stepdone(8): step8_resolv(args, cfg)
# emerge --sync
if not stepdone(9): step9_sync(args, cfg)
# bindist
if not stepdone(10): step10_emerge_pkgs(args, cfg)
# emerge packages
# configure & emerge kernel (use cloud configuration too)
if not stepdone(11): step11_kernel(args, cfg)
# grub
if not stepdone(12): step12_grub(args, cfg)
# enable serial console
if not stepdone(13): step13_serial(args, cfg)
# services
if not stepdone(14): step14_services(args, cfg)
# eth0 naming
# timezone
if not stepdone(15): step15_ethnaming(args, cfg)
# locale
# set some sysctl things
# set some dhcp things
# hostname
if not stepdone(16): step16_sysconfig(args, cfg)
# fstab
if not stepdone(17): step17_fstab(args, cfg)
# copy cloud cfg?
gentooimgr.chroot.unbind()
# Finish install processes like emaint and eix-update and news read

View File

@ -0,0 +1,128 @@
import os
import sys
import re
import shutil
import datetime
from subprocess import Popen, PIPE
import time
import gentooimgr.configs
DEFAULT_KERNEL_CONFIG_PATH = os.path.join(os.sep, 'etc', 'kernel', 'default.config')
def kernel_conf_apply(args, config):
"""Kernel configuration is a direct copy of a full complete config file"""
fname, ext = os.path.splitext(args.config)
# Default is the json file's name but with .config extension.
kernelconfig = os.path.join(gentooimgr.configs.CONFIG_DIR,
config.get("kernel", {}).get("config", f"{fname}.config"))
kernelpath = config.get("kernel", {}).get("path", DEFAULT_KERNEL_CONFIG_PATH)
if os.path.exists(kernelpath):
os.remove(kernelpath)
else:
# Ensure if we have directories specified that they exist
os.makedirs(os.path.dirname(kernelpath), exist_ok=True)
shutil.copyfile(kernelconfig, kernelpath)
def build_kernel(args, config):
if config.get("kernel", {}).get("config") is None:
kernel_default_config(args, config)
os.chdir(args.kernel_dir)
kernelpath = config.get("kernel", {}).get("path", DEFAULT_KERNEL_CONFIG_PATH)
kernel_conf_apply(args, config)
proc = Popen(['genkernel', f'--kernel-config={kernelpath}', '--save-config', '--no-menuconfig', 'all'])
proc.communicate()
kernel_save_config(args, config)
def kernel_default_config(args, config):
os.chdir(args.kernel_dir)
proc = Popen(["make", "defconfig"])
proc.communicate()
def kernel_save_config(args, config):
os.chdir(args.kernel_dir)
"""Saves the current .config file"""
proc = Popen(["make", "savedefconfig"])
proc.communicate()
GRUB_CFG = """
# Copyright 1999-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/sys-boot/grub/files/grub.default-3,v 1.5 2015/03/25 01:58:00 floppym Exp $
#
# To populate all changes in this file you need to regenerate your
# grub configuration file afterwards:
# 'grub2-mkconfig -o /boot/grub/grub.cfg'
#
# See the grub info page for documentation on possible variables and
# their associated values.
GRUB_DISTRIBUTOR="Gentoo"
# Default menu entry
#GRUB_DEFAULT=0
# Boot the default entry this many seconds after the menu is displayed
#GRUB_TIMEOUT=5
#GRUB_TIMEOUT_STYLE=menu
# Append parameters to the linux kernel command line
# openrc only spits to the last console=tty
GRUB_CMDLINE_LINUX="net.ifnames=0 vga=791 console=tty0 console=ttyS0,115200"
#
# Examples:
#
# Boot with network interface renaming disabled
# GRUB_CMDLINE_LINUX="net.ifnames=0"
#
# Boot with systemd instead of sysvinit (openrc)
# GRUB_CMDLINE_LINUX="init=/usr/lib/systemd/systemd"
# Append parameters to the linux kernel command line for non-recovery entries
#GRUB_CMDLINE_LINUX_DEFAULT=""
# Uncomment to disable graphical terminal (grub-pc only)
GRUB_TERMINAL="serial console"
GRUB_SERIAL_COMMAND="serial --speed=115200"
#GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
# The resolution used on graphical terminal.
# Note that you can use only modes which your graphic card supports via VBE.
# You can see them in real GRUB with the command `vbeinfo'.
#GRUB_GFXMODE=640x480
# Set to 'text' to force the Linux kernel to boot in normal text
# mode, 'keep' to preserve the graphics mode set using
# 'GRUB_GFXMODE', 'WIDTHxHEIGHT'['xDEPTH'] to set a particular
# graphics mode, or a sequence of these separated by commas or
# semicolons to try several modes in sequence.
#GRUB_GFXPAYLOAD_LINUX=
# Path to theme spec txt file.
# The starfield is by default provided with use truetype.
# NOTE: when enabling custom theme, ensure you have required font/etc.
#GRUB_THEME="/boot/grub/themes/starfield/theme.txt"
# Background image used on graphical terminal.
# Can be in various bitmap formats.
#GRUB_BACKGROUND="/boot/grub/mybackground.png"
# Uncomment if you don't want GRUB to pass "root=UUID=xxx" parameter to kernel
#GRUB_DISABLE_LINUX_UUID=true
# Uncomment to disable generation of recovery mode menu entries
#GRUB_DISABLE_RECOVERY=true
# Uncomment to disable generation of the submenu and put all choices on
# the top-level menu.
# Besides the visual affect of no sub menu, this makes navigation of the
# menu easier for a user who can't see the screen.
GRUB_DISABLE_SUBMENU=y
# Uncomment to play a tone when the main menu is displayed.
# This is useful, for example, to allow users who can't see the screen
# to know when they can make a choice on the menu.
#GRUB_INIT_TUNE="60 800 1"
"""

View File

@ -0,0 +1,101 @@
"""Qemu commands to run and handle the image"""
import os
import sys
import argparse
from subprocess import Popen, PIPE
from gentooimgr import LOG
import gentooimgr.config
import gentooimgr.common
def create_image(args, config: dict, overwrite: bool = False) -> str:
"""Creates an image (.img) file using qemu that will be used to create the cloud image
:Parameters:
- config: dictionary/json configuration containing required information
- overwrite: if True, run_image() will call this and re-create.
:Returns:
Full path to image file produced by qemu
"""
image = gentooimgr.common.get_image_name(args, config)
name, ext = os.path.splitext(image)
if os.path.exists(image) and not overwrite:
return os.path.abspath(image)
cmd = ['qemu-img', 'create', '-f', ext[1:], image, str(config.get("imgsize", "12G"))]
proc = Popen(cmd, stderr=PIPE, stdout=PIPE)
stdout, stderr = proc.communicate()
return os.path.abspath(image)
def run_image(
args: argparse.Namespace,
config: dict,
mounts=[]):
"""Handle mounts and run the live cd image
- mount_isos: list of iso paths to mount in qemu as disks.
"""
iso = config.get("iso")
prefix = args.temporary_dir
LOG.info(f"iso from config {iso}")
if iso is None:
iso = gentooimgr.common.find_iso(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
".."
)
)
LOG.info(f"iso from cwd {iso}")
if not iso:
prefix = config.get('temporary_dir')
iso = gentooimgr.common.find_iso(prefix)
LOG.info(f"iso from {prefix} {iso}")
assert iso, f"iso not found {iso}"
if isinstance(iso, list):
assert len(iso), f"iso list is empty {iso}"
iso = iso[0]
image = gentooimgr.common.get_image_name(args, config)
qmounts = []
mounts.extend(args.mounts)
for i in mounts:
qmounts.append("-drive")
qmounts.append(f"file={i},media=cdrom")
assert image, f"image is empty {image}"
if not os.path.exists(image):
if os.path.exists(os.path.join(prefix, image)):
image = os.path.join(prefix, image)
assert os.path.exists(image), f"image not found {image}"
threads = args.threads
cmd = [
"qemu-system-x86_64",
"-enable-kvm",
"-boot", "d",
"-m", str(config.get("memory", 2048)),
"-smp", str(threads),
"-drive", f"file={image},if=virtio,index=0",
"-cdrom", iso,
"-net", "nic,model=virtio",
"-vga", "virtio",
"-cpu", "kvm64",
"-chardev", "file,id=charserial0,path=gentoo.log",
"-device", "isa-serial,chardev=charserial0,id=serial0",
"-chardev", "pty,id=charserial1",
"-device", "isa-serial,chardev=charserial1,id=serial1"
]
# "-net", "user",
# -net user: network backend 'user' is not compiled into this binary"
cmd += qmounts
LOG.info(cmd)
proc = Popen(cmd, stderr=PIPE, stdout=PIPE)
stdout, stderr = proc.communicate()
if stderr:
LOG.error(str(stderr))

View File

@ -0,0 +1,37 @@
import os
from gentooimgr import LOG
import gentooimgr.config
import gentooimgr.qemu
import gentooimgr.common
def run(args, config: dict) -> None:
LOG.info(": run")
mounts = args.mounts
# Specified image or look for gentoo.{img,qcow2}
image = config.get("imagename") or args.image or gentooimgr.qemu.create_image()
# We need to package up our gentooimgr package into an iso and mount it to the running image
# Why? basic gentoo livecd has no git and no pip installer. We want install to be simple
# and use the same common codebase.
# This will require a couple mount commands to function though.
main_iso = gentooimgr.common.make_iso_from_dir(os.path.join(
os.path.abspath(os.path.dirname(__file__)),
".."
))
assert os.path.isfile(main_iso), f"iso not found {main_iso}"
LOG.info(args)
LOG.info(f'iso={args.iso}')
if args.iso != config['iso']:
LOG.warn(f'iso={args.iso}')
config['iso'] = args.iso
else:
LOG.info(f'iso={args.iso}')
gentooimgr.qemu.run_image(
args,
config,
# Add our generated mount and livecd (assumed)
mounts=[main_iso]
)
LOG.info("done")

View File

@ -0,0 +1,15 @@
import os
import datetime
from subprocess import PIPE, Popen
def shrink(args, config, stamp=None):
if stamp is None:
dt = datetime.datetime.utcnow()
# 0 padded month and day timestamp
stamp = f"{dt.year}-{dt.month:02d}-{dt.day:02d}"
name, ext = os.path.splitext(config.get("imagename") or args.img)
# ext includes the .
filename = f"{name}-{stamp}{ext}"
proc = Popen(["virt-sparsify", "--compress", args.img, filename])
proc.communicate()
return filename

View File

@ -0,0 +1,54 @@
"""Step 1: Disk Partitioning
Step 2: Mounting {gentooimgr.config.GENTOO_MOUNT}
Step 3: Stage3 Tarball
Step 4: Binding Filesystems
Step 5: Portage
Step 6: Licenses
Step 7: Repo Configuration
Step 8: Resolv
Step 9: sync
Step 10: emerge pkgs
Step 11: kernel
Step 12: kernel
Step 13: Serial
Step 14: Services
Step 15: Eth Naming
Step 16: Sysconfig
Step 17: fstab
"""
import os
import sys
import json
# from gentooimgr import LOG
import gentooimgr.config
import gentooimgr.configs
from gentooimgr import install
def print_template(args, configjson, prefix='/tmp'):
print(__doc__)
sys.stderr.write(f"the last step to succeed is {install.getlaststep(prefix)}\n")
print(f"the last step to succeed is {install.getlaststep(prefix)}\n")
print(f"""------------------------ STATUS ------------------------
CPU_THREADS = {args.threads or 1}
TEMPORARY_DIRECTORY = {args.temporary_dir}
PROFILE = {args.profile}
""")
print(f"CONFIG {args.config}")
print(json.dumps(configjson, sort_keys=True, indent=4))
# inherit = configjson.get("inherit")
# if inherit:
# print(f"CONFIG {inherit}")
# j = gentooimgr.config.load_default_config(inherit)
# if not j:
# j = gentooimgr.config.load_config(inherit)
#
# print(json.dumps(j, sort_keys=True, indent=4))
# print(f"""------------------------ PACKAGES ------------------------""")
# for k, v in configjson.get("packages").items():
# print(k.upper())
# print("\t" + '\n\t'.join(v))
# print()

View File

@ -0,0 +1,189 @@
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
IMPORT_ERR = None
try:
# import _argon2_xffi_bindings
import pykeepass as keepass
except ImportError:
IMPORT_ERR = traceback.format_exc()
DOCUMENTATION = r'''
---
module: ansible-keepassxc
short_description: Module to read credentials from KeePassXC
version_added: "0.0.1"
description: Module to read credentials from KeePassXC
options:
database:
description: Path to database file
required: true
type: str
password:
description: Database Password
required: true
type: str
keyfile:
description: Path to key file
required: false
type: str
entry:
description: Entry name for the attribute to fetch
required: true
type: str
group:
decription: Group name that the Entry belongs to
required: false
type: str
author:
- Jeremy Lumley (@jlumley)
'''
EXAMPLES = r'''
# Fetch the credentials for the server_1 entry in any group
- name: Fetch server_1 credentials
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "s3cure_p4550rd"
entry: "server_1"
# Fetch the reddit entry in the social group
- name: Fetching reddit credentials
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "sup3r_s3cure_p4550rd"
entry: "reddit"
group: "social"
# Fetch a custom strig attribute from the github entry
- name: Fetch Github API Token
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "d0pe_s3cure_p4550rd"
keyfile: "/secrets/top_secret_key"
entry: "github"
group: "development"
'''
RETURN = r'''
# Return values
username:
description: Username of entry if present
type: str
returned: always
sample: 's3cr3t_us3r'
password:
description: Password of entry if present
type: str
returned: always
sample: 's3cr3t_p455word'
url:
description: Url of entry if present
type: str
returned: always
sample: 'http://reddit.com'
custom_fields:
description: dictionary containing all custom fields
type: dict
returned: always
sample: False
no_log:
description: suppress logging of password
type: bool
returned: never
sample: False
'''
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
database = dict(type='str', required=True),
password = dict(type='str', required=False,
default=os.environ.get('ANSIBLE_KEEPASSXC_PASSWORD')),
keyfile = dict(type='str', required=False, default=None),
entry = dict(type='str', required=True),
group = dict(type='str', required=False),
no_log = dict(type='bool', required=False, default=False),
)
# seed the result dict in the object
result = dict(
changed=False,
username='',
password='',
url='',
custom_fields={}
)
# Currently no support for a check_mode this maybe added later if
# functionality to modify the database is added later
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
)
if IMPORT_ERR:
module.fail_json(
msg=missing_required_lib("pykeepass"),
exception=IMPORT_ERR
)
# unlock local keepass database
try:
kp = keepass.PyKeePass(
module.params['database'],
password=module.params['password'],
keyfile=module.params['keyfile'])
except keepass.exceptions.CredentialsError:
module.fail_json(msg='Invalid Credentials')
# find entry
entry = kp.find_entries(
title=module.params['entry'],
group=module.params['group']
)
# fail is entry is not present
if not entry:
module.fail_json(msg=f"Unable to find entry: {module.params['entry']}")
else:
entry = entry[0]
custom_field_keys = entry._get_string_field_keys(exclude_reserved=True)
custom_fields = dict()
for key in custom_field_keys:
custom_fields[key] = entry.get_custom_property(key)
result = dict (
changed=False,
username=entry.username,
password=entry.password,
url=entry.url,
custom_fields=custom_fields
)
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,189 @@
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
IMPORT_ERR = None
try:
# import _argon2_xffi_bindings
import pykeepass as keepass
except ImportError:
IMPORT_ERR = traceback.format_exc()
DOCUMENTATION = r'''
---
module: ansible-keepassxc
short_description: Module to read credentials from KeePassXC
version_added: "0.0.1"
description: Module to read credentials from KeePassXC
options:
database:
description: Path to database file
required: true
type: str
password:
description: Database Password
required: true
type: str
keyfile:
description: Path to key file
required: false
type: str
entry:
description: Entry name for the attribute to fetch
required: true
type: str
group:
decription: Group name that the Entry belongs to
required: false
type: str
author:
- Jeremy Lumley (@jlumley)
'''
EXAMPLES = r'''
# Fetch the credentials for the server_1 entry in any group
- name: Fetch server_1 credentials
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "s3cure_p4550rd"
entry: "server_1"
# Fetch the reddit entry in the social group
- name: Fetching reddit credentials
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "sup3r_s3cure_p4550rd"
entry: "reddit"
group: "social"
# Fetch a custom strig attribute from the github entry
- name: Fetch Github API Token
jlumley.jlumley.ansible-keepassxc:
database: "/secrets/db.kdbx"
password: "d0pe_s3cure_p4550rd"
keyfile: "/secrets/top_secret_key"
entry: "github"
group: "development"
'''
RETURN = r'''
# Return values
username:
description: Username of entry if present
type: str
returned: always
sample: 's3cr3t_us3r'
password:
description: Password of entry if present
type: str
returned: always
sample: 's3cr3t_p455word'
url:
description: Url of entry if present
type: str
returned: always
sample: 'http://reddit.com'
custom_fields:
description: dictionary containing all custom fields
type: dict
returned: always
sample: False
no_log:
description: suppress logging of password
type: bool
returned: never
sample: False
'''
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
database = dict(type='str', required=True),
password = dict(type='str', required=False,
default=os.environ.get('ANSIBLE_KEEPASSXC_PASSWORD')),
keyfile = dict(type='str', required=False, default=None),
entry = dict(type='str', required=True),
group = dict(type='str', required=False),
no_log = dict(type='bool', required=False, default=False),
)
# seed the result dict in the object
result = dict(
changed=False,
username='',
password='',
url='',
custom_fields={}
)
# Currently no support for a check_mode this maybe added later if
# functionality to modify the database is added later
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
)
if IMPORT_ERR:
module.fail_json(
msg=missing_required_lib("pykeepass"),
exception=IMPORT_ERR
)
# unlock local keepass database
try:
kp = keepass.PyKeePass(
module.params['database'],
password=module.params['password'],
keyfile=module.params['keyfile'])
except keepass.exceptions.CredentialsError:
module.fail_json(msg='Invalid Credentials')
# find entry
entry = kp.find_entries(
title=module.params['entry'],
group=module.params['group']
)
# fail is entry is not present
if not entry:
module.fail_json(msg=f"Unable to find entry: {module.params['entry']}")
else:
entry = entry[0]
custom_field_keys = entry._get_string_field_keys(exclude_reserved=True)
custom_fields = dict()
for key in custom_field_keys:
custom_fields[key] = entry.get_custom_property(key)
result = dict (
changed=False,
username=entry.username,
password=entry.password,
url=entry.url,
custom_fields=custom_fields
)
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,226 @@
#!/usr/bin/python3
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import logging
from argparse import Namespace
import pathlib
import traceback
sys.path.append('/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr')
# in the library
if os.environ.get('TESTF_ANSIBLE_SRC',''):
# running from source
mod_path = os.environ.get('TESTF_ANSIBLE_SRC','')
mod_path = os.path.join(mod_path, 'src', 'ansible_gentooimgr')
assert os.path.isdir(mod_path), f"parent {mod_path}"
assert os.path.isfile(os.path.join(mod_path, '__init__.py')),f"index {mod_path}"
assert os.path.isdir(os.path.join(mod_path, 'gentooimgr')), f"sub {mod_path}"
sys.path.append(mod_path)
try:
import gentooimgr
except Exception as e:
sys.stderr.write(f"{e} {sys.path} {traceback.print_exc()}")
raise
import ansible
DOCUMENTATION = rf'''
---
module: gentooimgr
short_description: Gentoo Image Builder for Cloud and Turnkey ISO installers
version_added: "1.0.0"
description:
* This project enables easy access to building ``systemd`` or ``openrc`` -based images.
* Performs automatic download AND verification of the linux iso, stage3 tarball and portage.
* Caches the iso and stage3 .txt files for at most a day before redownloading and rechecking for new files
* Sane and readable cli commands to build, run and test.
* Step system to enable user to continue off at the same place if a step fails
* No heavy packages like rust included ** TODO
options:
action:
description: The action to be run by the image builder
choices:
- build
- run
- status
- install
- chroot
- unchroot
- command
- shrink
- kernel
required: true
# clean test
config:
default: cloud.json
description: init configuration file or or base.json or cloud.json
required: false
loglevel:
default: {logging.INFO}
description: python logging level <= 50, INFO=20
required: false
threads:
default: 1
description: Number of threads to use
required: false
profile:
default: openrc
description: The init system
choices:
- openrc
- systemd
required: false
kernel_dir:
default: /usr/src/linux
description: Where kernel is specified. By default uses the active linux kernel
required: false
portage:
description: Extract the specified portage tarball onto the filesystem
required: false
stage3:
description: Extract the specified stage3 package onto the filesystema
required: false
action_args:
default: []
description: Arguments for some of the actions - UNUSED!
required: false
temporary_dir:
description: Path to temporary directory for downloading files (20G)
required: false
qcow:
description: Path to file to serve as the base image
required: false
# Specify this value according to your collection
# in format of namespace.collection.doc_fragment_name
# extends_documentation_fragment:
# - my_namespace.my_collection.my_doc_fragment_name
author:
- Your Name (@yourGitHubHandle)
'''
#[-y DAYS]
# [-d DOWNLOAD_DIR]
# [-f]
# [--format FORMAT]
EXAMPLES = r'''
# Pass in a message
- name: Test with a message
my_namespace.my_collection.my_test:
name: hello world
# pass in a message and have changed true
- name: Test with a message and changed output
my_namespace.my_collection.my_test:
name: hello world
new: true
# fail the module
- name: Test failure of the module
my_namespace.my_collection.my_test:
name: fail me
'''
RETURN = r'''
# These are examples of possible return values, and in general should use other names for return values.
message:
description: The output message that the test module generates.
type: str
returned: always
sample: 'goodbye'
'''
from ansible.module_utils.basic import AnsibleModule
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
action=dict(type='str', required=True),
loglevel=dict(type='int', required=False, default=logging.INFO),
threads=dict(type='int', required=False, default=1),
config=dict(type='str', default='cloud.json', required=False),
profile=dict(type='str', required=False),
kernel_dir=dict(type='path', required=False),
portage=dict(type='path', required=False),
stage3=dict(type='path', required=False),
temporary_dir=dict(type='path', required=False, default=pathlib.Path(os.getcwd())),
download_dir=dict(type='path', required=False, default=pathlib.Path(os.getcwd())),
qcow=dict(type='path', required=False),
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
original_message='',
message=''
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
module.exit_json(**result)
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
# if module.params.get('thirsty'):
oargs = Namespace(**module.params)
# during the execution of the module, if there is an exception or a
# conditional state that effectively causes a failure, run
# AnsibleModule.fail_json() to pass in the message and the result
result['original_message'] = ""
try:
from gentooimgr.__main__ import main
retval = main(oargs)
except Exception as e:
result['message'] = str(e)
e = traceback.print_exc()
if e: result['original_message'] += f"{e}"
module.fail_json(msg='Exception', **result)
else:
result['message'] = str(retval)
# use whatever logic you need to determine whether or not this module
# made any modifications to your target
if dArgs['action'] in ['status']:
result['changed'] = False
else:
result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,5 @@
pydantic
typing
urllib
progressbar
requests

View File

@ -0,0 +1,174 @@
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
*.diff
*.good
*.bad
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.pylint.err
.pylint.log
.pylint.out
*.dst
*~
.rsync.sh
.rsync.sh