aboutsummaryrefslogtreecommitdiff
path: root/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'ansible')
-rw-r--r--ansible/ansible.cfg6
-rw-r--r--ansible/connection_plugins/lxc_ssh.py1259
-rw-r--r--ansible/group_vars/all/bird.yml3
-rw-r--r--ansible/group_vars/all/ipam.yml105
-rw-r--r--ansible/host_vars/danneri/systemd-networkd.yml2
-rw-r--r--ansible/host_vars/hash/roa-server.vault.yml19
-rw-r--r--ansible/host_vars/kjell-ct-102/traefik-server.yml0
-rw-r--r--ansible/host_vars/kjell-ct-102/users.yml5
-rw-r--r--ansible/host_vars/lhn2pi/all.yml2
-rw-r--r--ansible/host_vars/unifi/systemd-networkd.yml2
-rw-r--r--ansible/inventory59
-rw-r--r--ansible/netbox/pyproject.toml10
-rw-r--r--ansible/netbox/sync-unifi.py363
-rw-r--r--ansible/netbox/uv.lock129
-rw-r--r--ansible/plays/danneri.yml27
-rw-r--r--ansible/plays/files/zigbee2mqtt/garasjepi/configuration.yaml43
-rw-r--r--ansible/plays/host-garasjepi.yml28
-rw-r--r--ansible/plays/host-hash.yml10
-rw-r--r--ansible/plays/host-lhn2pi.yml6
-rw-r--r--ansible/plays/host-unifi.yml18
-rw-r--r--ansible/plays/ipam-generate-dns.yml34
-rw-r--r--ansible/plays/ipam-generate-tf.yml55
-rw-r--r--ansible/plays/kjell-ct-102.yml49
-rw-r--r--ansible/plays/otelcol-contrib.yml29
-rw-r--r--ansible/plays/roa-server.yml25
-rw-r--r--ansible/plays/templates/danneri/systemd-networkd/enp1s0.network8
-rw-r--r--ansible/plays/templates/lhn2pi/systemd/network/10-eth0.network11
-rw-r--r--ansible/plays/templates/otelcol-contrib/config.yaml65
-rw-r--r--ansible/plays/templates/roa-server/docker-compose.yml14
-rw-r--r--ansible/plays/templates/traefik-proxy.toml.j2126
-rw-r--r--ansible/plays/templates/unifi/systemd-networkd/enp1s0.network8
-rw-r--r--ansible/plays/zigbee2mqtt-backup.yml13
-rw-r--r--ansible/prometheus/deploy-config.yml19
-rw-r--r--ansible/prometheus/files/conflatorio/prometheus.yml25
-rw-r--r--ansible/roles/prometheus-bird-exporter/handlers/main.yml5
-rw-r--r--ansible/roles/prometheus-bird-exporter/tasks/main.yml18
-rw-r--r--ansible/roles/prometheus-node-exporter/handlers/main.yml5
-rw-r--r--ansible/roles/prometheus-node-exporter/tasks/main.yml18
-rw-r--r--ansible/roles/superusers/tasks/main.yml2
-rw-r--r--ansible/roles/systemd-networkd/handlers/main.yml5
-rw-r--r--ansible/roles/systemd-networkd/tasks/main.yml17
-rw-r--r--ansible/roles/traefik-server/handlers/main.yml5
-rw-r--r--ansible/roles/traefik-server/tasks/main.yml56
-rw-r--r--ansible/roles/traefik-server/templates/traefik.service.j252
-rw-r--r--ansible/roles/unifi/handlers/main.yml3
-rw-r--r--ansible/roles/unifi/tasks/main.yml23
-rw-r--r--ansible/unifi.yml6
-rw-r--r--ansible/wg0/files/coregonus/etc/wireguard/public-wg0.key1
-rw-r--r--ansible/wg0/group_vars/all/wireguard_wg0.yml7
-rw-r--r--ansible/wg0/wireguard-wg0-terraform.yml3
-rw-r--r--ansible/zigbee2mqtt.yml7
51 files changed, 1502 insertions, 1308 deletions
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
index 6da8010..2190846 100644
--- a/ansible/ansible.cfg
+++ b/ansible/ansible.cfg
@@ -1,6 +1,5 @@
[defaults]
become_method = sudo
-connection_plugins = ./connection_plugins
inventory = ./inventory
#,./inventory-terraform
nocows = True
@@ -8,3 +7,8 @@ stdout_callback = debug
vault_password_file = ./.vault-password
roles_path = roles:thirdparty
retry_files_enabled = False
+vars_plugins_enabled = host_group_vars,community.sops.sops
+
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=1200
diff --git a/ansible/connection_plugins/lxc_ssh.py b/ansible/connection_plugins/lxc_ssh.py
deleted file mode 100644
index 2bb5352..0000000
--- a/ansible/connection_plugins/lxc_ssh.py
+++ /dev/null
@@ -1,1259 +0,0 @@
-# Copyright 2016 Pierre Chifflier <pollux@wzdftpd.net>
-#
-# SSH + lxc-attach connection module for Ansible 2.0
-#
-# Adapted from ansible/plugins/connection/ssh.py
-# Forked from https://github.com/chifflier/ansible-lxc-ssh
-# Hosted on https://github.com/andreasscherbaum/ansible-lxc-ssh
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import errno
-import fcntl
-import hashlib
-import os
-import pipes
-import pty
-import shlex
-import subprocess
-import sys
-from distutils.version import LooseVersion
-
-from ansible.release import __version__ as ansible_version
-if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- from functools import wraps
-from ansible import constants as C
-if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- from ansible.compat.six import text_type, binary_type
- from ansible.errors import AnsibleConnectionFailure, AnsibleError
- from ansible.plugins.connection import ConnectionBase
-if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
- from ansible.errors import AnsibleOptionsError
- from ansible.compat import selectors
- from ansible.module_utils.six import PY3, text_type, binary_type
- if LooseVersion(ansible_version) < LooseVersion('2.4.0.0'):
- from ansible.compat.six.moves import shlex_quote
- else:
- from ansible.module_utils.six.moves import shlex_quote
- from ansible.module_utils._text import to_bytes, to_native, to_text
- if LooseVersion(ansible_version) >= LooseVersion('2.4.0.0'):
- from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
- from ansible.plugins.connection import ConnectionBase, BUFSIZE
-from ansible.utils.path import unfrackpath, makedirs_safe
-
-if LooseVersion(ansible_version) >= LooseVersion('2.2.0.0'):
- from ansible.module_utils._text import to_bytes, to_text as to_unicode, to_native as to_str
-else:
- from ansible.utils.unicode import to_bytes, to_unicode, to_str
-
-try:
- from __main__ import display
-except ImportError:
- from ansible.utils.display import Display
- display = Display()
-
-if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- import select
-
-
-# only used from Ansible version 2.3 on forward
-class AnsibleControlPersistBrokenPipeError(AnsibleError):
- ''' ControlPersist broken pipe '''
- pass
-
-
-def _ssh_retry(func):
- """
- Decorator to retry ssh/scp/sftp in the case of a connection failure
-
- Will retry if:
- * an exception is caught
- * ssh returns 255
- Will not retry if
- * remaining_tries is <2
- * retries limit reached
- """
- @wraps(func)
- def wrapped(self, *args, **kwargs):
- remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
- cmd_summary = "%s..." % args[0]
- for attempt in range(remaining_tries):
- cmd = args[0]
- if attempt != 0 and self._play_context.password and isinstance(cmd, list):
- # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
- self.sshpass_pipe = os.pipe()
- cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
-
- try:
- try:
- return_tuple = func(self, *args, **kwargs)
- display.vvv(return_tuple, host=self.host)
- # 0 = success
- # 1-254 = remote command return code
- # 255 = failure from the ssh command itself
- except (AnsibleControlPersistBrokenPipeError) as e:
- # Retry one more time because of the ControlPersist broken pipe (see #16731)
- display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
- return_tuple = func(self, *args, **kwargs)
-
- if return_tuple[0] != 255:
- break
- else:
- raise AnsibleConnectionFailure("Failed to connect to the host via ssh: %s" % to_native(return_tuple[2]))
- except (AnsibleConnectionFailure, Exception) as e:
- if attempt == remaining_tries - 1:
- raise
- else:
- pause = 2 ** attempt - 1
- if pause > 30:
- pause = 30
-
- if isinstance(e, AnsibleConnectionFailure):
- msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
- else:
- msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
-
- display.vv(msg, host=self.host)
-
- time.sleep(pause)
- continue
-
- return return_tuple
- return wrapped
-
-
-class Connection(ConnectionBase):
- ''' ssh+lxc_attach connection '''
- transport = 'lxc_ssh'
-
- def __init__(self, play_context, new_stdin, *args, **kwargs):
- #print args
- #print kwargs
- super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
- self.host = self._play_context.remote_addr
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- self.port = self._play_context.port
- self.user = self._play_context.remote_user
- self.control_path = C.ANSIBLE_SSH_CONTROL_PATH
- self.control_path_dir = C.ANSIBLE_SSH_CONTROL_PATH_DIR
- self.lxc_version = None
-
- # LXC v1 uses 'lxc-info', 'lxc-attach' and so on
- # LXC v2 uses just 'lxc'
- (returncode2, stdout2, stderr2) = self._exec_command("which lxc", None, False)
- (returncode1, stdout1, stderr1) = self._exec_command("which lxc-info", None, False)
- if (returncode2 == 0):
- self.lxc_version = 2
- display.vvv('LXC v2')
- elif (returncode1 == 0):
- self.lxc_version = 1
- display.vvv('LXC v1')
- else:
- raise AnsibleConnectionFailure('Cannot identify LXC version')
- sys.exit(1)
-
-
- # The connection is created by running ssh/scp/sftp from the exec_command,
- # put_file, and fetch_file methods, so we don't need to do any connection
- # management here.
- def _connect(self):
- ''' connect to the lxc; nothing to do here '''
- display.vvv('XXX connect')
- super(Connection, self)._connect()
- #self.container_name = self.ssh._play_context.remote_addr
- self.container_name = self._play_context.ssh_extra_args # XXX
- #self.container = None
-
-
- # only used from Ansible version 2.3 on forward
- @staticmethod
- def _create_control_path(host, port, user, connection=None):
- '''Make a hash for the controlpath based on con attributes'''
- pstring = '%s-%s-%s' % (host, port, user)
- if connection:
- pstring += '-%s' % connection
- m = hashlib.sha1()
- m.update(to_bytes(pstring))
- digest = m.hexdigest()
- cpath = '%(directory)s/' + digest[:10]
- return cpath
-
-
- @staticmethod
- def _persistence_controls(b_command):
- '''
- Takes a command array and scans it for ControlPersist and ControlPath
- settings and returns two booleans indicating whether either was found.
- This could be smarter, e.g. returning false if ControlPersist is 'no',
- but for now we do it simple way.
- '''
-
- controlpersist = False
- controlpath = False
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- for arg in b_command:
- if 'controlpersist' in arg.lower():
- controlpersist = True
- elif 'controlpath' in arg.lower():
- controlpath = True
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- for b_arg in (a.lower() for a in b_command):
- if b'controlpersist' in b_arg:
- controlpersist = True
- elif b'controlpath' in b_arg:
- controlpath = True
-
- return controlpersist, controlpath
-
-
- @staticmethod
- def _split_args(argstring):
- """
- Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
- list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
- the argument list. The list will not contain any empty elements.
- """
- return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
-
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- def _add_args(self, explanation, args):
- """
- Adds the given args to self._command and displays a caller-supplied
- explanation of why they were added.
- """
- self._command += args
- display.vvvvv('SSH: ' + explanation + ': (%s)' % ')('.join(args), host=self._play_context.remote_addr)
-
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- def _add_args(self, b_command, b_args, explanation):
- """
- Adds arguments to the ssh command and displays a caller-supplied explanation of why.
- :arg b_command: A list containing the command to add the new arguments to.
- This list will be modified by this method.
- :arg b_args: An iterable of new arguments to add. This iterable is used
- more than once so it must be persistent (ie: a list is okay but a
- StringIO would not)
- :arg explanation: A text string containing explaining why the arguments
- were added. It will be displayed with a high enough verbosity.
- .. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
- """
- display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self._play_context.remote_addr)
- b_command += b_args
-
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- def _build_command(self, binary, *other_args):
- self._command = []
- self._command += [binary]
- self._command += ['-C']
- if self._play_context.verbosity > 3:
- self._command += ['-vvv']
- elif binary == 'ssh':
- # Older versions of ssh (e.g. in RHEL 6) don't accept sftp -q.
- self._command += ['-q']
- # Next, we add [ssh_connection]ssh_args from ansible.cfg.
- if self._play_context.ssh_args:
- args = self._split_args(self._play_context.ssh_args)
- self._add_args("ansible.cfg set ssh_args", args)
- # Now we add various arguments controlled by configuration file settings
- # (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
- # a combination thereof.
- if not C.HOST_KEY_CHECKING:
- self._add_args(
- "ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled",
- ("-o", "StrictHostKeyChecking=no")
- )
- if self._play_context.port is not None:
- self._add_args(
- "ANSIBLE_REMOTE_PORT/remote_port/ansible_port set",
- ("-o", "Port={0}".format(self._play_context.port))
- )
- key = self._play_context.private_key_file
- if key:
- self._add_args(
- "ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set",
- ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(key)))
- )
- if not self._play_context.password:
- self._add_args(
- "ansible_password/ansible_ssh_pass not set", (
- "-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no"
- )
- )
- user = self._play_context.remote_user
- if user:
- self._add_args(
- "ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set",
- ("-o", "User={0}".format(to_bytes(self._play_context.remote_user)))
- )
- self._add_args(
- "ANSIBLE_TIMEOUT/timeout set",
- ("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
- )
- # Check if ControlPersist is enabled and add a ControlPath if one hasn't
- # already been set.
- controlpersist, controlpath = self._persistence_controls(self._command)
- if controlpersist:
- self._persistent = True
- if not controlpath:
- cpdir = unfrackpath('$HOME/.ansible/cp')
- display.vv(str(C.ANSIBLE_SSH_CONTROL_PATH))
- # The directory must exist and be writable.
- makedirs_safe(cpdir, 0o700)
- if not os.access(cpdir, os.W_OK):
- raise AnsibleError("Cannot write to ControlPath %s" % cpdir)
- args = ("-o", "ControlPath={0}".format(
- to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir)))
- )
- self._add_args("found only ControlPersist; added ControlPath", args)
- ## Finally, we add any caller-supplied extras.
- if other_args:
- self._command += other_args
- return self._command
-
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- def _build_command(self, binary, *other_args):
- b_command = []
- if binary == 'ssh':
- b_command += [to_bytes(self._play_context.ssh_executable, errors='surrogate_or_strict')]
- else:
- b_command += [to_bytes(binary, errors='surrogate_or_strict')]
- if self._play_context.verbosity > 3:
- b_command.append(b'-vvv')
- # Next, we add [ssh_connection]ssh_args from ansible.cfg.
- #
- if self._play_context.ssh_args:
- b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
- self._split_args(self._play_context.ssh_args)]
- self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
-
- # Now we add various arguments controlled by configuration file settings
- # (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
- # a combination thereof.
- if not C.HOST_KEY_CHECKING:
- b_args = (b"-o", b"StrictHostKeyChecking=no")
- self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
- if self._play_context.port is not None:
- b_args = (b"-o", b"Port=" + to_bytes(self._play_context.port, nonstring='simplerepr', errors='surrogate_or_strict'))
- self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
- key = self._play_context.private_key_file
- if key:
- b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
- self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
- if not self._play_context.password:
- self._add_args(
- b_command, (
- b"-o", b"KbdInteractiveAuthentication=no",
- b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- b"-o", b"PasswordAuthentication=no"
- ),
- u"ansible_password/ansible_ssh_pass not set"
- )
- user = self._play_context.remote_user
- if user:
- self._add_args(
- b_command,
- (b"-o", b"User=" + to_bytes(self._play_context.remote_user, errors='surrogate_or_strict')),
- u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
- )
- self._add_args(
- b_command,
- (b"-o", b"ConnectTimeout=" + to_bytes(self._play_context.timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
- u"ANSIBLE_TIMEOUT/timeout set"
- )
- # Check if ControlPersist is enabled and add a ControlPath if one hasn't
- # already been set.
- controlpersist, controlpath = self._persistence_controls(b_command)
- if controlpersist:
- self._persistent = True
- if not controlpath:
- cpdir = unfrackpath(self.control_path_dir)
- b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
- # The directory must exist and be writable.
- makedirs_safe(b_cpdir, 0o700)
- if not os.access(b_cpdir, os.W_OK):
- raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
-
- if not self.control_path:
- self.control_path = self._create_control_path(
- self.host,
- self.port,
- self.user
- )
- b_args = (b"-o", b"ControlPath=" + to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
- self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
-
- # Finally, we add any caller-supplied extras.
- if other_args:
- b_command += [to_bytes(a) for a in other_args]
-
- return b_command
-
-
- def _send_initial_data(self, fh, in_data):
- '''
- Writes initial data to the stdin filehandle of the subprocess and closes
- it. (The handle must be closed; otherwise, for example, "sftp -b -" will
- just hang forever waiting for more commands.)
- '''
-
- display.debug('Sending initial data')
-
- try:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- fh.write(in_data)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- fh.write(to_bytes(in_data))
- fh.close()
- except (OSError, IOError):
- raise AnsibleConnectionFailure('SSH Error: data could not be sent to remote host "%s". Make sure this host can be reached over ssh' % self.host)
-
- display.debug('Sent initial data (%d bytes)' % len(in_data))
-
-
- # Used by _run() to kill processes on failures
- @staticmethod
- def _terminate_process(p):
- """ Terminate a process, ignoring errors """
- try:
- p.terminate()
- except (OSError, IOError):
- pass
-
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- # This is separate from _run() because we need to do the same thing for stdout
- # and stderr.
- def _examine_output(self, source, state, chunk, sudoable):
- '''
- Takes a string, extracts complete lines from it, tests to see if they
- are a prompt, error message, etc., and sets appropriate flags in self.
- Prompt and success lines are removed.
-
- Returns the processed (i.e. possibly-edited) output and the unprocessed
- remainder (to be processed with the next chunk) as strings.
- '''
-
- output = []
- for l in chunk.splitlines(True):
- suppress_output = False
-
- #display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
- if self._play_context.prompt and self.check_password_prompt(l):
- display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
- self._flags['become_prompt'] = True
- suppress_output = True
- elif self._play_context.success_key and self.check_become_success(l):
- display.debug("become_success: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
- self._flags['become_success'] = True
- suppress_output = True
- elif sudoable and self.check_incorrect_password(l):
- display.debug("become_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
- self._flags['become_error'] = True
- elif sudoable and self.check_missing_password(l):
- display.debug("become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
- self._flags['become_nopasswd_error'] = True
-
- if not suppress_output:
- output.append(l)
-
- # The chunk we read was most likely a series of complete lines, but just
- # in case the last line was incomplete (and not a prompt, which we would
- # have removed from the output), we retain it to be processed with the
- # next chunk.
-
- remainder = ''
- if output and not output[-1].endswith('\n'):
- remainder = output[-1]
- output = output[:-1]
-
- return ''.join(output), remainder
-
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- # This is separate from _run() because we need to do the same thing for stdout
- # and stderr.
- def _examine_output(self, source, state, b_chunk, sudoable):
- '''
- Takes a string, extracts complete lines from it, tests to see if they
- are a prompt, error message, etc., and sets appropriate flags in self.
- Prompt and success lines are removed.
- Returns the processed (i.e. possibly-edited) output and the unprocessed
- remainder (to be processed with the next chunk) as strings.
- '''
-
- output = []
- for b_line in b_chunk.splitlines(True):
- display_line = to_text(b_line).rstrip('\r\n')
- suppress_output = False
-
- # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
- if self._play_context.prompt and self.check_password_prompt(b_line):
- display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
- self._flags['become_prompt'] = True
- suppress_output = True
- elif self._play_context.success_key and self.check_become_success(b_line):
- display.debug("become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
- self._flags['become_success'] = True
- suppress_output = True
- elif sudoable and self.check_incorrect_password(b_line):
- display.debug("become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
- self._flags['become_error'] = True
- elif sudoable and self.check_missing_password(b_line):
- display.debug("become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
- self._flags['become_nopasswd_error'] = True
-
- if not suppress_output:
- output.append(b_line)
-
- # The chunk we read was most likely a series of complete lines, but just
- # in case the last line was incomplete (and not a prompt, which we would
- # have removed from the output), we retain it to be processed with the
- # next chunk.
-
- remainder = b''
- if output and not output[-1].endswith(b'\n'):
- remainder = output[-1]
- output = output[:-1]
-
- return b''.join(output), remainder
-
-
- # only used from Ansible version 2.3 on forward
- def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
- '''
- Starts the command and communicates with it until it ends.
- '''
-
- display_cmd = list(map(shlex_quote, map(to_text, cmd)))
- display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
-
- # Start the given command. If we don't need to pipeline data, we can try
- # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
- # pipelining data, or can't create a pty, we fall back to using plain
- # old pipes.
-
- p = None
-
- if isinstance(cmd, (text_type, binary_type)):
- cmd = to_bytes(cmd)
- else:
- cmd = map(to_bytes, cmd)
-
- if not in_data:
- try:
- # Make sure stdin is a proper pty to avoid tcgetattr errors
- master, slave = pty.openpty()
- if PY3 and self._play_context.password:
- p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
- else:
- p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'wb', 0)
- os.close(slave)
- except (OSError, IOError):
- p = None
-
- if not p:
- if PY3 and self._play_context.password:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
- else:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- # If we are using SSH password authentication, write the password into
- # the pipe we opened in _build_command.
-
- if self._play_context.password:
- os.close(self.sshpass_pipe[0])
- try:
- os.write(self.sshpass_pipe[1], to_bytes(self._play_context.password) + b'\n')
- except OSError as e:
- # Ignore broken pipe errors if the sshpass process has exited.
- if e.errno != errno.EPIPE or p.poll() is None:
- raise
- os.close(self.sshpass_pipe[1])
-
- #
- # SSH state machine
- #
-
- # Now we read and accumulate output from the running process until it
- # exits. Depending on the circumstances, we may also need to write an
- # escalation password and/or pipelined input to the process.
-
- states = [
- 'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
- ]
-
- # Are we requesting privilege escalation? Right now, we may be invoked
- # to execute sftp/scp with sudoable=True, but we can request escalation
- # only when using ssh. Otherwise we can send initial data straightaway.
-
- state = states.index('ready_to_send')
- if b'ssh' in cmd:
- if self._play_context.prompt:
- # We're requesting escalation with a password, so we have to
- # wait for a password prompt.
- state = states.index('awaiting_prompt')
- display.debug(u'Initial state: %s: %s' % (states[state], self._play_context.prompt))
- elif self._play_context.become and self._play_context.success_key:
- # We're requesting escalation without a password, so we have to
- # detect success/failure before sending any initial data.
- state = states.index('awaiting_escalation')
- display.debug(u'Initial state: %s: %s' % (states[state], self._play_context.success_key))
-
- # We store accumulated stdout and stderr output from the process here,
- # but strip any privilege escalation prompt/confirmation lines first.
- # Output is accumulated into tmp_*, complete lines are extracted into
- # an array, then checked and removed or copied to stdout or stderr. We
- # set any flags based on examining the output in self._flags.
-
- b_stdout = b_stderr = b''
- b_tmp_stdout = b_tmp_stderr = b''
-
- self._flags = dict(
- become_prompt=False, become_success=False,
- become_error=False, become_nopasswd_error=False
- )
-
- # select timeout should be longer than the connect timeout, otherwise
- # they will race each other when we can't connect, and the connect
- # timeout usually fails
- timeout = 2 + self._play_context.timeout
- for fd in (p.stdout, p.stderr):
- fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- # TODO: bcoca would like to use SelectSelector() when open
- # filehandles is low, then switch to more efficient ones when higher.
- # select is faster when filehandles is low.
- selector = selectors.DefaultSelector()
- selector.register(p.stdout, selectors.EVENT_READ)
- selector.register(p.stderr, selectors.EVENT_READ)
-
- # If we can send initial data without waiting for anything, we do so
- # before we start polling
- if states[state] == 'ready_to_send' and in_data:
- self._send_initial_data(stdin, in_data)
- state += 1
-
- try:
- while True:
- poll = p.poll()
- events = selector.select(timeout)
-
- # We pay attention to timeouts only while negotiating a prompt.
-
- if not events:
- # We timed out
- if state <= states.index('awaiting_escalation'):
- # If the process has already exited, then it's not really a
- # timeout; we'll let the normal error handling deal with it.
- if poll is not None:
- break
- self._terminate_process(p)
- raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
-
- # Read whatever output is available on stdout and stderr, and stop
- # listening to the pipe if it's been closed.
-
- for key, event in events:
- if key.fileobj == p.stdout:
- b_chunk = p.stdout.read()
- if b_chunk == b'':
- # stdout has been closed, stop watching it
- selector.unregister(p.stdout)
- # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
- # first connection goes into the background and we never see EOF
- # on stderr. If we see EOF on stdout, lower the select timeout
- # to reduce the time wasted selecting on stderr if we observe
- # that the process has not yet existed after this EOF. Otherwise
- # we may spend a long timeout period waiting for an EOF that is
- # not going to arrive until the persisted connection closes.
- timeout = 1
- b_tmp_stdout += b_chunk
- display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
- elif key.fileobj == p.stderr:
- b_chunk = p.stderr.read()
- if b_chunk == b'':
- # stderr has been closed, stop watching it
- selector.unregister(p.stderr)
- b_tmp_stderr += b_chunk
- display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
-
- # We examine the output line-by-line until we have negotiated any
- # privilege escalation prompt and subsequent success/error message.
- # Afterwards, we can accumulate output without looking at it.
-
- if state < states.index('ready_to_send'):
- if b_tmp_stdout:
- b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
- b_stdout += b_output
- b_tmp_stdout = b_unprocessed
-
- if b_tmp_stderr:
- b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
- b_stderr += b_output
- b_tmp_stderr = b_unprocessed
- else:
- b_stdout += b_tmp_stdout
- b_stderr += b_tmp_stderr
- b_tmp_stdout = b_tmp_stderr = b''
-
- # If we see a privilege escalation prompt, we send the password.
- # (If we're expecting a prompt but the escalation succeeds, we
- # didn't need the password and can carry on regardless.)
-
- if states[state] == 'awaiting_prompt':
- if self._flags['become_prompt']:
- display.debug('Sending become_pass in response to prompt')
- stdin.write(to_bytes(self._play_context.become_pass) + b'\n')
- self._flags['become_prompt'] = False
- state += 1
- elif self._flags['become_success']:
- state += 1
-
- # We've requested escalation (with or without a password), now we
- # wait for an error message or a successful escalation.
-
- if states[state] == 'awaiting_escalation':
- if self._flags['become_success']:
- display.debug('Escalation succeeded')
- self._flags['become_success'] = False
- state += 1
- elif self._flags['become_error']:
- display.debug('Escalation failed')
- self._terminate_process(p)
- self._flags['become_error'] = False
- raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
- elif self._flags['become_nopasswd_error']:
- display.debug('Escalation requires password')
- self._terminate_process(p)
- self._flags['become_nopasswd_error'] = False
- raise AnsibleError('Missing %s password' % self._play_context.become_method)
- elif self._flags['become_prompt']:
- # This shouldn't happen, because we should see the "Sorry,
- # try again" message first.
- display.debug('Escalation prompt repeated')
- self._terminate_process(p)
- self._flags['become_prompt'] = False
- raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
-
- # Once we're sure that the privilege escalation prompt, if any, has
- # been dealt with, we can send any initial data and start waiting
- # for output.
-
- if states[state] == 'ready_to_send':
- if in_data:
- self._send_initial_data(stdin, in_data)
- state += 1
-
- # Now we're awaiting_exit: has the child process exited? If it has,
- # and we've read all available output from it, we're done.
-
- if poll is not None:
- if not selector.get_map() or not events:
- break
- # We should not see further writes to the stdout/stderr file
- # descriptors after the process has closed, set the select
- # timeout to gather any last writes we may have missed.
- timeout = 0
- continue
-
- # If the process has not yet exited, but we've already read EOF from
- # its stdout and stderr (and thus no longer watching any file
- # descriptors), we can just wait for it to exit.
-
- elif not selector.get_map():
- p.wait()
- break
-
- # Otherwise there may still be outstanding data to read.
- finally:
- selector.close()
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
-
- if C.HOST_KEY_CHECKING:
- if cmd[0] == b"sshpass" and p.returncode == 6:
- raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
- 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
- if p.returncode != 0 and controlpersisterror:
- raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
- '(or ssh_args in [ssh_connection] section of the config file) before running again')
-
- # If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
- # we raise a special exception so that we can retry a connection.
- controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
- if p.returncode == 255 and controlpersist_broken_pipe:
- raise AnsibleControlPersistBrokenPipeError('SSH Error: data could not be sent because of ControlPersist broken pipe.')
-
- if p.returncode == 255 and in_data and checkrc:
- raise AnsibleConnectionFailure('SSH Error: data could not be sent to remote host "%s". Make sure this host can be reached over ssh' % self.host)
-
- return (p.returncode, b_stdout, b_stderr)
-
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- @_ssh_retry
- def _run(self, cmd, in_data, sudoable=True, checkrc=True):
- """Wrapper around _bare_run that retries the connection
- """
- return self._bare_run(cmd, in_data, sudoable, checkrc)
-
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- def _run(self, cmd, in_data, sudoable=True):
- '''
- Starts the command and communicates with it until it ends.
- '''
-
- display_cmd = map(to_unicode, map(pipes.quote, cmd))
- display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
-
- # Start the given command. If we don't need to pipeline data, we can try
- # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
- # pipelining data, or can't create a pty, we fall back to using plain
- # old pipes.
-
- p = None
-
- if isinstance(cmd, (text_type, binary_type)):
- cmd = to_bytes(cmd)
- else:
- cmd = map(to_bytes, cmd)
-
- if not in_data:
- try:
- # Make sure stdin is a proper pty to avoid tcgetattr errors
- master, slave = pty.openpty()
- p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'w', 0)
- os.close(slave)
- except (OSError, IOError):
- p = None
-
- if not p:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- # If we are using SSH password authentication, write the password into
- # the pipe we opened in _build_command.
-
- if self._play_context.password:
- os.close(self.sshpass_pipe[0])
- os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password)))
- os.close(self.sshpass_pipe[1])
-
- ## SSH state machine
- #
- # Now we read and accumulate output from the running process until it
- # exits. Depending on the circumstances, we may also need to write an
- # escalation password and/or pipelined input to the process.
-
- states = [
- 'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
- ]
-
- # Are we requesting privilege escalation? Right now, we may be invoked
- # to execute sftp/scp with sudoable=True, but we can request escalation
- # only when using ssh. Otherwise we can send initial data straightaway.
-
- state = states.index('ready_to_send')
- if b'ssh' in cmd:
- if self._play_context.prompt:
- # We're requesting escalation with a password, so we have to
- # wait for a password prompt.
- state = states.index('awaiting_prompt')
- display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt))
- elif self._play_context.become and self._play_context.success_key:
- # We're requesting escalation without a password, so we have to
- # detect success/failure before sending any initial data.
- state = states.index('awaiting_escalation')
- display.debug('Initial state: %s: %s' % (states[state], self._play_context.success_key))
-
- # We store accumulated stdout and stderr output from the process here,
- # but strip any privilege escalation prompt/confirmation lines first.
- # Output is accumulated into tmp_*, complete lines are extracted into
- # an array, then checked and removed or copied to stdout or stderr. We
- # set any flags based on examining the output in self._flags.
-
- stdout = stderr = ''
- tmp_stdout = tmp_stderr = ''
-
- self._flags = dict(
- become_prompt=False, become_success=False,
- become_error=False, become_nopasswd_error=False
- )
-
- # select timeout should be longer than the connect timeout, otherwise
- # they will race each other when we can't connect, and the connect
- # timeout usually fails
- timeout = 2 + self._play_context.timeout
- rpipes = [p.stdout, p.stderr]
- for fd in rpipes:
- fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- # If we can send initial data without waiting for anything, we do so
- # before we call select.
-
- if states[state] == 'ready_to_send' and in_data:
- self._send_initial_data(stdin, in_data)
- state += 1
-
- while True:
- rfd, wfd, efd = select.select(rpipes, [], [], timeout)
-
- # We pay attention to timeouts only while negotiating a prompt.
-
- if not rfd:
- if state <= states.index('awaiting_escalation'):
- # If the process has already exited, then it's not really a
- # timeout; we'll let the normal error handling deal with it.
- if p.poll() is not None:
- break
- self._terminate_process(p)
- raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, stdout))
-
- # Read whatever output is available on stdout and stderr, and stop
- # listening to the pipe if it's been closed.
-
- if p.stdout in rfd:
- chunk = p.stdout.read()
- if chunk == '':
- rpipes.remove(p.stdout)
- tmp_stdout += chunk
- display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
-
- if p.stderr in rfd:
- chunk = p.stderr.read()
- if chunk == '':
- rpipes.remove(p.stderr)
- tmp_stderr += chunk
- display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
-
- # We examine the output line-by-line until we have negotiated any
- # privilege escalation prompt and subsequent success/error message.
- # Afterwards, we can accumulate output without looking at it.
-
- if state < states.index('ready_to_send'):
- if tmp_stdout:
- output, unprocessed = self._examine_output('stdout', states[state], tmp_stdout, sudoable)
- stdout += output
- tmp_stdout = unprocessed
-
- if tmp_stderr:
- output, unprocessed = self._examine_output('stderr', states[state], tmp_stderr, sudoable)
- stderr += output
- tmp_stderr = unprocessed
- else:
- stdout += tmp_stdout
- stderr += tmp_stderr
- tmp_stdout = tmp_stderr = ''
-
- # If we see a privilege escalation prompt, we send the password.
- # (If we're expecting a prompt but the escalation succeeds, we
- # didn't need the password and can carry on regardless.)
-
- if states[state] == 'awaiting_prompt':
- if self._flags['become_prompt']:
- display.debug('Sending become_pass in response to prompt')
- stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
- self._flags['become_prompt'] = False
- state += 1
- elif self._flags['become_success']:
- state += 1
-
- # We've requested escalation (with or without a password), now we
- # wait for an error message or a successful escalation.
-
- if states[state] == 'awaiting_escalation':
- if self._flags['become_success']:
- display.debug('Escalation succeeded')
- self._flags['become_success'] = False
- state += 1
- elif self._flags['become_error']:
- display.debug('Escalation failed')
- self._terminate_process(p)
- self._flags['become_error'] = False
- raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
- elif self._flags['become_nopasswd_error']:
- display.debug('Escalation requires password')
- self._terminate_process(p)
- self._flags['become_nopasswd_error'] = False
- raise AnsibleError('Missing %s password' % self._play_context.become_method)
- elif self._flags['become_prompt']:
- # This shouldn't happen, because we should see the "Sorry,
- # try again" message first.
- display.debug('Escalation prompt repeated')
- self._terminate_process(p)
- self._flags['become_prompt'] = False
- raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
-
- # Once we're sure that the privilege escalation prompt, if any, has
- # been dealt with, we can send any initial data and start waiting
- # for output.
-
- if states[state] == 'ready_to_send':
- if in_data:
- self._send_initial_data(stdin, in_data)
- state += 1
-
- # Now we're awaiting_exit: has the child process exited? If it has,
- # and we've read all available output from it, we're done.
-
- if p.poll() is not None:
- if not rpipes or not rfd:
- break
-
- # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
- # first connection goes into the background and we never see EOF
- # on stderr. If we see EOF on stdout and the process has exited,
- # we're probably done. We call select again with a zero timeout,
- # just to make certain we don't miss anything that may have been
- # written to stderr between the time we called select() and when
- # we learned that the process had finished.
-
- if p.stdout not in rpipes:
- timeout = 0
- continue
-
- # If the process has not yet exited, but we've already read EOF from
- # its stdout and stderr (and thus removed both from rpipes), we can
- # just wait for it to exit.
-
- elif not rpipes:
- p.wait()
- break
-
- # Otherwise there may still be outstanding data to read.
-
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
-
- if C.HOST_KEY_CHECKING:
- if cmd[0] == b"sshpass" and p.returncode == 6:
- raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
- if p.returncode != 0 and controlpersisterror:
- raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
-
- if p.returncode == 255 and in_data:
- raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
-
- return (p.returncode, stdout, stderr)
-
-
- def _exec_command(self, cmd, in_data=None, sudoable=True):
- ''' run a command on the remote host '''
-
- super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
-
- display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
-
- # we can only use tty when we are not pipelining the modules. piping
- # data into /usr/bin/python inside a tty automatically invokes the
- # python interactive-mode but the modules are not compatible with the
- # interactive-mode ("unexpected indent" mainly because of empty lines)
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- ssh_executable = self._play_context.ssh_executable
- if in_data:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', self.host, cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, self.host, cmd)
- else:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', '-tt', self.host, cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, '-tt', self.host, cmd)
-
- (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
-
- return (returncode, stdout, stderr)
-
-
- def dir_print(self,obj):
- for attr_name in dir(obj):
- try:
- attr_value = getattr(obj, attr_name)
- print(attr_name, attr_value, callable(attr_value))
- except:
- pass
-
-
- #
- # Main public methods
- #
- def exec_command(self, cmd, in_data=None, sudoable=False):
- ''' run a command on the chroot '''
- display.vvv('XXX exec_command: %s' % cmd)
- super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- ssh_executable = self._play_context.ssh_executable
- ##print dir(self)
- ##print dir(self._play_context)
- ##print self._play_context._attributes
- #self.dir_print(self._play_context)
- #vm = self._play_context.get_ds()
- #print( vm )
- #raise "blah"
- h = self.container_name
- if (self.lxc_version == 2):
- lxc_cmd = 'sudo -i lxc exec %s --mode=non-interactive -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
- elif (self.lxc_version == 1):
- lxc_cmd = 'sudo -i lxc-attach --name %s -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
- if in_data:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', self.host, lxc_cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, self.host, lxc_cmd)
- else:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', '-tt', self.host, lxc_cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, '-tt', self.host, lxc_cmd)
- #self.ssh.exec_command(lxc_cmd,in_data,sudoable)
- (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
- return (returncode, stdout, stderr)
-
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to lxc '''
- super(Connection, self).put_file(in_path, out_path)
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- display.vvv('XXX put_file %s %s' % (in_path,out_path))
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
- ssh_executable = self._play_context.ssh_executable
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
-
- with open(in_path,'r') as in_f:
- in_data = in_f.read()
- cmd = ('cat > %s; echo -n done' % pipes.quote(out_path))
- h = self.container_name
- if (self.lxc_version == 2):
- lxc_cmd = 'sudo lxc exec %s --mode=non-interactive -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
- elif (self.lxc_version == 1):
- lxc_cmd = 'sudo lxc-attach --name %s -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
- if in_data:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', self.host, lxc_cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, self.host, lxc_cmd)
- else:
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = self._build_command('ssh', '-tt', self.host, lxc_cmd)
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, '-tt', self.host, lxc_cmd)
- #self.ssh.exec_command(lxc_cmd,in_data,sudoable)
- (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=False)
- return (returncode, stdout, stderr)
-
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from lxc to local '''
- super(Connection, self).fetch_file(in_path, out_path)
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- display.vvv('XXX fetch_file %s %s' % (in_path,out_path))
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
- ssh_executable = self._play_context.ssh_executable
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- cmd = ('cat %s' % pipes.quote(in_path))
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = ('cat < %s' % pipes.quote(in_path))
- h = self.container_name
- if (self.lxc_version == 2):
- lxc_cmd = 'sudo lxc exec %s --mode=non-interactive -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
- elif (self.lxc_version == 1):
- lxc_cmd = 'sudo lxc-attach --name %s -- /bin/sh -c %s' \
- % (pipes.quote(h),
- pipes.quote(cmd))
-
- if LooseVersion(ansible_version) < LooseVersion('2.3.0.0'):
- in_data = None
- if in_data:
- cmd = self._build_command('ssh', self.host, lxc_cmd)
- else:
- cmd = self._build_command('ssh', '-tt', self.host, lxc_cmd)
- (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=False)
- if returncode != 0:
- raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
- with open(out_path,'w') as out_f:
- out_f.write(stdout)
-
- if LooseVersion(ansible_version) >= LooseVersion('2.3.0.0'):
- cmd = self._build_command(ssh_executable, self.host, lxc_cmd)
- (returncode, stdout, stderr) = self._run(cmd, None, sudoable=False)
-
- if returncode != 0:
- raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
- with open(out_path,'w') as out_f:
- out_f.write(stdout)
-
- return (returncode, stdout, stderr)
-
-
- # only used from Ansible version 2.3 on forward
- def reset(self):
- # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
- cmd = self._build_command(self._play_context.ssh_executable, '-O', 'stop', self.host)
- controlpersist, controlpath = self._persistence_controls(cmd)
- if controlpersist:
- display.vvv(u'sending stop: %s' % cmd)
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- status_code = p.wait()
- if status_code != 0:
- raise AnsibleError("Cannot reset connection:\n%s" % stderr)
- self.close()
-
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- display.vvv('XXX close')
- super(Connection, self).close()
- #self.ssh.close()
- self._connected = False
diff --git a/ansible/group_vars/all/bird.yml b/ansible/group_vars/all/bird.yml
new file mode 100644
index 0000000..dce5afa
--- /dev/null
+++ b/ansible/group_vars/all/bird.yml
@@ -0,0 +1,3 @@
+# which version of bird is used, 0-padded with 3 digits
+# 2.15 => 2015
+birdv: 0
diff --git a/ansible/group_vars/all/ipam.yml b/ansible/group_vars/all/ipam.yml
new file mode 100644
index 0000000..5621385
--- /dev/null
+++ b/ansible/group_vars/all/ipam.yml
@@ -0,0 +1,105 @@
+# Netmasks for prefixes:
+# 48: ffff:ffff:ffff:0000::
+# 52: ffff:ffff:ffff:f000::
+# 56: ffff:ffff:ffff:ff00::
+# 60: ffff:ffff:ffff:fff0::
+# 64: ffff:ffff:ffff:ffff::
+# 80: ffff:ffff:ffff:ffff:ffff:
+ipam6:
+ networks:
+ bitraf_dn42:
+ range: "fdb1:4242:3538::/48"
+ tnet_dn42:
+ range: "fdb1:4242:3538:2000::/52"
+ unused_2001:
+ range: "fdb1:4242:3538:2001::/64"
+ node1_dn42:
+ range: "fdb1:4242:3538:2002::/64"
+ hosts:
+ node1: "fdb1:4242:3538:2002::ffff"
+ node2_dn42:
+ range: "fdb1:4242:3538:2003::/64"
+ hosts:
+ node2: "fdb1:4242:3538:2003::ffff"
+ knot_dn42:
+ range: "fdb1:4242:3538:2004::/64"
+ hosts:
+ knot: "fdb1:4242:3538:2004::ffff"
+ coregonus_dn42:
+ range: "fdb1:4242:3538:2005::/64"
+ hosts:
+ kjell: "fdb1:4242:3538:2006:2f0:cbff:fefe:d98c/64"
+ coregonus_docker:
+ range: "fdb1:4242:3538:2005:df01:676a:ec28:0a00/120"
+ kv24_pub:
+ range: "2a01:799:111d:1801::/64"
+ kv24_dn42:
+ range: "fdb1:4242:3538:2006::/64"
+ hosts:
+ kv24ix: "fdb1:4242:3538:2006::ffff"
+ garasjepi: "fdb1:4242:3538:2006:e9f7:418f:49fd:8d2e"
+ ha-kv24: "fdb1:4242:3538:2006:18a:90f:4264:8ca0/64"
+ netbox: "fdb1:4242:3538:2006:be24:11ff:febb:5c7f/64"
+ coregonus: "fdb1:4242:3538:2006:2e0:4cff:fe98:1bb5/64"
+ hash_dn42:
+ range: "fdb1:4242:3538:2007::/64"
+ hosts:
+ hash: "fdb1:4242:3538:2007::ffff"
+ hash_docker_dn42:
+ range: "fdb1:4242:3538:2007:1001::/112"
+ lhn2_dn42:
+ range: "fdb1:4242:3538:2008::/64"
+ hosts:
+ lhn2pi: "fdb1:4242:3538:2008::ffff"
+ lhn2-z2m: "fdb1:4242:3538:2008:9aed:e460:1711:07dd"
+ ha-lhn2: "fdb1:4242:3538:2008:9c59:926f:1dc9:89db"
+ conflatorio: "fdb1:4242:3538:2008:8042:32ff:fe0c:7161"
+ danneri: "fdb1:4242:3538:2008:9422:d355:95b7:f170"
+ unifi: "fdb1:4242:3538:2008:5054:ff:fe4d:96c"
+
+# k8s:
+# range: "fdb1:4242:3538:3000::/52"
+ danneri_cluster:
+ range: "fdb1:4242:3538:2008:aaaa:aaaa:aaaa::/112"
+ danneri_service:
+ range: "fdb1:4242:3538:2008:bbbb:bbbb:bbbb::/112"
+# danneri_service2:
+# range: "fdb1:4242:3538:300a::/112"
+
+ dn42:
+ range: "fd00::/8"
+
+routers:
+ knot:
+ as: 4242423538
+ peers:
+ hash:
+ routedbits_lon1:
+ as: 4242420207
+ hash:
+ as: 4242423538
+ peers:
+ knot:
+ lhn2:
+ as: 4242423538
+ peers:
+ knot:
+ node1:
+ as: 4242423538
+ peers:
+ knot:
+ hash:
+ node2:
+ as: 4242423538
+ peers:
+ knot:
+ hash:
+ kv24:
+ as: 4242423538
+ peers:
+ knot:
+ danneri:
+ as: 4242423538
+ peers:
+ knot:
+ hash:
diff --git a/ansible/host_vars/danneri/systemd-networkd.yml b/ansible/host_vars/danneri/systemd-networkd.yml
new file mode 100644
index 0000000..023e276
--- /dev/null
+++ b/ansible/host_vars/danneri/systemd-networkd.yml
@@ -0,0 +1,2 @@
+systemd_networkd__files:
+ - danneri/systemd-networkd/enp1s0.network
diff --git a/ansible/host_vars/hash/roa-server.vault.yml b/ansible/host_vars/hash/roa-server.vault.yml
new file mode 100644
index 0000000..a750fb2
--- /dev/null
+++ b/ansible/host_vars/hash/roa-server.vault.yml
@@ -0,0 +1,19 @@
+$ANSIBLE_VAULT;1.1;AES256
+38363463316565643131623966623232623833613832383566353166636462613237396635396239
+3832343533663432353731353231313732386662333035330a363464616131316264613331383333
+31353331336166313361623833343135653761653133623931396464383436633132393963303462
+3630653434643266610a613130653961636362313065353833613036623239333635643164333266
+64373064363563666435383062626139356630643163386134366133333933383939343265646365
+33323165353331656232303133613263346530376333336565393235393564373562613732323766
+32613534306565386135303263383561316230303434656664323635666463663062313661343338
+39313535393964383232643337666364343763623964303130343631393964633330303038666364
+64346362343066643566333030313232396334643139613066336332633466663466663530346339
+39613430303461326431663832386537643061313961663332356661663535306266323064313634
+62393663373364336239626233396336636232376532343732616432343031653361383734333235
+31343032396532313531396135376263373163396634626166363366663365653562613130313839
+65656136633965643035353234333037663363616366323830333265616236613761323836303461
+39656237343561646166616265383630366432333631303938393938346232613039373735356333
+36626537353564353662616566643635336464336432636464616663336661373965323035326232
+34373831613465313161343132383036666338303166626639646539303438376335323261356532
+34346535656462646562333332393561656262656631303465346330643934343039663762396563
+3437326539616661643163396461663930376232396136333634
diff --git a/ansible/host_vars/kjell-ct-102/traefik-server.yml b/ansible/host_vars/kjell-ct-102/traefik-server.yml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ansible/host_vars/kjell-ct-102/traefik-server.yml
diff --git a/ansible/host_vars/kjell-ct-102/users.yml b/ansible/host_vars/kjell-ct-102/users.yml
new file mode 100644
index 0000000..d0d4852
--- /dev/null
+++ b/ansible/host_vars/kjell-ct-102/users.yml
@@ -0,0 +1,5 @@
+lusers:
+ - trygvis
+
+superusers:
+ - trygvis
diff --git a/ansible/host_vars/lhn2pi/all.yml b/ansible/host_vars/lhn2pi/all.yml
new file mode 100644
index 0000000..447906a
--- /dev/null
+++ b/ansible/host_vars/lhn2pi/all.yml
@@ -0,0 +1,2 @@
+systemd_networkd__files:
+ - lhn2pi/systemd/network/10-eth0.network
diff --git a/ansible/host_vars/unifi/systemd-networkd.yml b/ansible/host_vars/unifi/systemd-networkd.yml
new file mode 100644
index 0000000..4ee9ee6
--- /dev/null
+++ b/ansible/host_vars/unifi/systemd-networkd.yml
@@ -0,0 +1,2 @@
+systemd_networkd__files:
+ - unifi/systemd-networkd/enp1s0.network
diff --git a/ansible/inventory b/ansible/inventory
index 17f3ec7..912b922 100644
--- a/ansible/inventory
+++ b/ansible/inventory
@@ -29,24 +29,40 @@ all:
ansible_python_interpreter: /usr/bin/python3
nextcloud:
ansible_host: 192.168.10.201
- unifi:
- ansible_host: 192.168.10.202
babypi:
ansible_host: 192.168.10.159
- astyanax:
- ansible_host: astyanax.vpn.trygvis.io
+ # astyanax:
+ # ansible_host: astyanax.vpn.trygvis.io
sweetzpot-mobile:
ansible_host: 192.168.10.123
sweetzpot-macos:
biwia:
ansible_host: biwia.vpn.trygvis.io
lhn2pi:
+ ansible_host: lhn2pi.vpn.trygvis.io
lhn2ix:
kv24ix:
+ coregonus:
+ ansible_host: 192.168.10.190
+ danneri:
+ ansible_host: danneri.dn42.trygvis.io
+ unifi:
+ ansible_host: unifi.dn42.trygvis.io
+ garasjepi:
+ ansible_host: garasjepi.dn42.trygvis.io
+
+ node1:
+ ansible_host: 9859f51e-1e3e-4c05-a826-b7fbe18d91be.pub.instances.scw.cloud
+ node2:
+ ansible_host: 927624a8-7824-444d-903d-8507eb1e0669.pub.instances.scw.cloud
zh2569.rsync.net:
ansible_user: zh2569
+ # Kjell
+ kjell-ct-102:
+ ansible_host: fdb1:4242:3538:2005:be24:11ff:fe34:b52c
+
children:
workstation:
children:
@@ -75,7 +91,6 @@ all:
malabaricus:
nextcloud:
numquam:
- unifi:
lxc_hosts:
hosts:
arius:
@@ -92,7 +107,6 @@ all:
debian_stretch:
hosts:
malabaricus:
- unifi:
vars:
packages__version: stretch
@@ -142,10 +156,12 @@ all:
android-trygvis:
arius:
astyanax:
+ state: absent
babypi:
birgitte:
biwia:
conflatorio:
+ coregonus:
hash:
knot:
kv24ix:
@@ -163,4 +179,35 @@ all:
hash:
knot:
+ tnet:
+ hosts:
+ akili:
+ astyanax:
+ conflatorio:
+ coregonus:
+ hash:
+ knot:
+ kv24ix:
+ lhn2ix:
+ lhn2pi:
+ node1:
+ node2:
+
+ tnet_bird:
+ hosts:
+ akili:
+ astyanax:
+ conflatorio:
+ coregonus:
+ hash:
+ knot:
+ lhn2pi:
+ node1:
+ node2:
+
+ zigbee2mqtt:
+ hosts:
+ garasjepi:
+ lhn2pi:
+
# vim: set filetype=yaml:
diff --git a/ansible/netbox/pyproject.toml b/ansible/netbox/pyproject.toml
new file mode 100644
index 0000000..84c0d6d
--- /dev/null
+++ b/ansible/netbox/pyproject.toml
@@ -0,0 +1,10 @@
+[project]
+name = "netbox"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.12"
+dependencies = [
+ "unifi-controller-api",
+ "pynetbox",
+]
diff --git a/ansible/netbox/sync-unifi.py b/ansible/netbox/sync-unifi.py
new file mode 100644
index 0000000..4427b20
--- /dev/null
+++ b/ansible/netbox/sync-unifi.py
@@ -0,0 +1,363 @@
+import os
+import sys
+from unifi_controller_api import UnifiController, UnifiDevice
+from unifi_controller_api.exceptions import UnifiAuthenticationError, UnifiAPIError
+from pprint import pprint
+
+import pynetbox
+from pynetbox.core.response import Record
+
+class Db():
+ def __init__(self):
+ self.devices = []
+ self.interfaces = []
+ self.ips = []
+ self.macs = []
+ self.cables = []
+
+class NotFoundException(Exception):
+ def __init__(self, msg):
+ super().__init__(msg)
+
+class Query():
+ def __init__(self, args, query, projection=lambda x: x.id):
+ self.args = args
+ self.query = query
+ self.projection = projection
+
+ def run(self, nb):
+ try:
+ ret = self.query()
+ except Exception as e:
+ print("Query failed: ")
+ print(f"Arguments: {value.args}")
+ print(e)
+ raise e
+
+ if ret is None:
+ raise NotFoundException(f"resource not found, args={self.args}")
+
+# if len(ret) != 1:
+# raise NotFoundException(f"multiple resources found, args={self.args}, result={ret}")
+
+ return self.projection(ret)
+
+def find_device(nb, name: str):
+ return Query({}, lambda: nb.dcim.devices.get(name=name))
+
+def find_interface_by_mac(nb, mac_address: str):
+ args = locals()
+ del args["nb"]
+ return Query(args, lambda: nb.dcim.interfaces.get(mac_address=mac_address))
+
+def find_interface(nb, device: str, name: str):
+ args = locals()
+ del args["nb"]
+ return Query(args, lambda: nb.dcim.interfaces.get(device=device, name=name))
+
+class NetboxCache():
+ def __init__(self, nb):
+ self.nb = nb
+ self.device_roles = {}
+ self.device_types = {}
+ self.ip_addresses = {}
+
+ def get_device_role(self, slug):
+ dt = self.device_roles.get(slug)
+
+ if dt is not None:
+ return dt
+
+ dt = self.nb.dcim.device_roles.get(slug=slug)
+ if dt is None:
+ raise Exception(f"No such device type: {slug}")
+
+ self.device_roles[slug] = dt
+ return dt
+
+ def get_device_type(self, slug):
+ dt = self.device_types.get(slug)
+
+ if dt is not None:
+ return dt
+
+ dt = self.nb.dcim.device_types.get(slug=slug)
+ if dt is None:
+ raise Exception(f"No such device type: {slug}")
+
+ self.device_types[slug] = dt
+ return dt
+
+ def get_or_create_ip_address(self, addr: str, vrf: Record | None, data):
+ vrf_id = vrf.id if vrf is not None else None
+ key = (addr, vrf_id)
+ ip = self.ip_addresses.get(key)
+ if ip is not None:
+ return ip
+
+ ip = self.nb.ipam.ip_addresses.get(address=addr, vrf_id=vrf_id)
+ if ip is not None:
+ print(f"Found IP address {ip.id} address={ip.address}, vrf={ip.vrf}")
+ ip.update(data)
+ ip = self.nb.ipam.ip_addresses.get(address=addr, vrf_id=vrf_id)
+ self.ip_addresses[key] = ip
+ return ip
+
+ ip = self.nb.ipam.ip_addresses.create(address=addr, vrf=vrf_id, status="active")
+ self.ip_addresses[key] = ip
+ return ip
+
+def create_or_update_device(nb, d):
+ device = nb.dcim.devices.get(name = d["name"])
+ if device is None:
+ device = nb.dcim.devices.create(d)
+ print(f"Created device id={device.id}, name={device.name}")
+
+ return device
+
+ print(f"Updating device id={device.id}, name={device.name}")
+ device.update(d)
+
+ return nb.dcim.devices.get(id=device.id)
+
+def create_or_update_interface(nb, i):
+ iface = nb.dcim.interfaces.get(device_id=i["device"], name=i["name"])
+ if iface is None:
+ iface = nb.dcim.interfaces.create(i)
+ print(f"Created interface id={iface.id}, name={iface.name}")
+
+ return iface
+
+ print(f"Updating interface id={iface.id}, name={iface.name}")
+ iface.update(i)
+ return nb.dcim.interfaces.get(id=iface.id)
+
+def create_or_update_mac_address(nb, data):
+ ma = nb.dcim.mac_addresses.get(mac_address=data["mac_address"])
+ if ma is None:
+ ma = nb.dcim.mac_addresses.create(data)
+ print(f"Created MAC address id={ma.id}, address={ma.mac_address}")
+
+ return ma
+
+ print(f"Updating MAC address id={ma.id}, address={ma.mac_address}")
+ ma.update(data)
+ return nb.dcim.mac_addresses.get(id=ma.id)
+
+def create_or_update_ip_address(nb, data):
+ ip = nb.ipam.ip_addresses.get(address=data["address"])
+ if ip is None:
+ ip = nb.ipam.ip_addresses.create(data)
+ print(f"Created IP address id={ip.id}, ip={ip.address}")
+
+ return ip
+
+ print(f"Updating IP address id={ip.id}, ip={ip.address}")
+ ip.update(data)
+ return nb.ipam.ip_addresses.get(id=ip.id)
+
+def create_or_update_cable(nb, data):
+ if len(data["a_terminations"]) == 1:
+ a = data["a_terminations"][0]
+ else:
+ raise Exception("only single termination is supported")
+
+ if len(data["b_terminations"]) == 1:
+ b = data["b_terminations"][0]
+ else:
+ raise Exception("only single termination is supported")
+
+ cable = nb.dcim.cables.get(
+ termination_a_type=a["object_type"],
+ termination_a_id=a["object_id"],
+ termination_b_type=b["object_type"],
+ termination_b_id=b["object_id"],
+ )
+ if cable is None:
+ cable = nb.dcim.cables.create(data)
+ print(f"Created Cable address id={ip.id}")
+
+ return cable
+
+ print(f"Updating cable id={ip.id}")
+ cable.update(data)
+ return nb.dcim.cables.get(id=cable.id)
+
+def process_switch(d: UnifiDevice, db: Db, nb: NetboxCache, site, vrf):
+# db.devices.append({
+# "name": d.name,
+# "device_type": nb.get_device_type("ubiquiti-us-8-150w").id,
+# "role": nb.get_device_role("switch").id,
+# "serial": d.serial,
+# "site_name": site,
+# })
+#
+# db.interfaces.append({
+# "device": find_device(nb.nb, name=d.name),
+# "name": "switch0",
+# "type": "virtual",
+# })
+#
+# db.ips.append({
+# "address": f"{d.ip}/32",
+# "is_primary": "true",
+# "vrf": vrf.id,
+# "assigned_object_id": find_interface(nb.nb, device=d.name, name="switch0"),
+# "assigned_object_type": "dcim.interface",
+## "is_primary": "true" TODO: does not work
+# })
+#
+# db.macs.append({
+# "mac_address": d.mac,
+# "assigned_object_id": find_interface(nb.nb, device=d.name, name="switch0"),
+# "assigned_object_type": "dcim.interface",
+## "is_primary": "true" TODO: does not work
+# })
+
+ pprint(d.lldp_info)
+
+ for e in d.lldp_info:
+ a = [
+ {
+ "object_type": "dcim.interface",
+ "object_id": find_interface(nb.nb, device=d.name, name=f"Port {e.local_port_idx} (PoE)"),
+ }
+ ]
+ b = [
+ {
+ "object_type": "dcim.interface",
+ "object_id": find_interface_by_mac(nb.nb, e.chassis_id),
+ }
+ ]
+
+ if e.chassis_id > d.mac:
+ a, b = b, a
+
+ db.cables.append({
+ "a_terminations": a,
+ "b_terminations": b,
+ "status": "connected",
+ })
+
+def sync_db(db: Db, nb):
+ def resolve_query(value):
+ if value is None or isinstance(value, str) or isinstance(value, int):
+ return value
+ elif isinstance(value, Query):
+ return value.run(nb)
+ elif isinstance(value, dict):
+ for k, v in value.items():
+ value[k] = resolve_query(v)
+ elif isinstance(value, list):
+ for i, item in enumerate(value):
+ value[i] = resolve_query(item)
+ else:
+ raise Exception(f"unsupported type: {value}")
+ return value
+
+ for device in db.devices:
+ device = resolve_query(device)
+ create_or_update_device(nb, device)
+
+ for iface in db.interfaces:
+ iface = resolve_query(iface)
+ create_or_update_interface(nb, iface)
+
+ for mac in db.macs:
+ mac = resolve_query(mac)
+ create_or_update_mac_address(nb, mac)
+
+ for ip in db.ips:
+ ip = resolve_query(ip)
+ create_or_update_ip_address(nb, ip)
+
+ for cable in db.cables:
+ try:
+ cable = resolve_query(cable)
+ pprint(cable)
+ create_or_update_cable(nb, cable)
+ except NotFoundException:
+ print("Cable failed, could not find endpoint")
+ continue
+
+def main():
+ unifi_url=os.getenv("UNIFI_URL")
+ unifi_username=os.getenv("UNIFI_USERNAME")
+ unifi_password=os.getenv("UNIFI_PASSWORD")
+ unifi_site=os.getenv("UNIFI_SITE")
+
+ netbox_url=os.getenv("NETBOX_URL")
+ netbox_token=os.getenv("NETBOX_TOKEN")
+ netbox_vrf_name=os.getenv("NETBOX_VRF")
+ netbox_site_name=os.getenv("NETBOX_SITE")
+
+ controller = controller_login(unifi_url, unifi_username, unifi_password)
+
+ (nb, netbox_site, netbox_vrf) = netbox_login(netbox_url, netbox_token, netbox_site_name, netbox_vrf_name)
+ status = nb.status()
+ print(f"NetBox status: {status}")
+
+ devices = collect_devices(controller, unifi_site)
+
+ nb_cache = NetboxCache(nb)
+ db = Db()
+ for d in devices:
+# pprint(d)
+ if d.model == "US8P150":
+ process_switch(d, db, nb_cache, netbox_site, netbox_vrf)
+
+ sync_db(db, nb)
+
+def controller_login(url, username, password) -> UnifiController:
+# try:
+ controller = UnifiController(
+ controller_url=url,
+ username=username,
+ password=password,
+ is_udm_pro=False,
+ verify_ssl=True,
+ )
+
+ # Just to check that there is a valid authentication
+ controller.get_unifi_site(include_health=False, raw=False)
+
+ return controller
+# except UnifiAuthenticationError:
+# print("Authentication failed - please check your UniFi Controller credentials and URL.")
+# except UnifiAPIError as e:
+# print(f"UniFi API error: {e}")
+# except Exception as e:
+# print(f"An unexpected error occurred: {e}")
+
+def collect_devices(controller: UnifiController, site_name: str) -> list[UnifiDevice]:
+ try:
+ return controller.get_unifi_site_device(site_name=site_name, detailed=True, raw=False)
+ except UnifiAPIError as e:
+ print(f"Error fetching device information: {e}")
+ except Exception as e:
+ print(f"An unexpected error occurred: {e}")
+
+def netbox_login(url: str, token: str, site_name: str, vrf_name: str) -> pynetbox.core.api.Api:
+ nb = pynetbox.api(url, token=token)
+
+ site = nb.dcim.sites.get(name=site_name)
+ if site is None:
+ site = nb.dcim.sites.get(slug=site_name)
+ if site is None:
+ print(f"Could not look up site by name or slug: {site_name}")
+ exit(1)
+ print(f"NetBox site {site.name}")
+
+ vrf = None
+ vrf_id = None
+ if vrf_name is not None:
+ vrf = nb.ipam.vrfs.get(site=site, name=vrf_name)
+ if vrf is None:
+ print(f"Could not look up VRF by slug: {vrf_name}")
+ exit(1)
+ vrf_id = vrf.id
+
+ return nb, site, vrf
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible/netbox/uv.lock b/ansible/netbox/uv.lock
new file mode 100644
index 0000000..96a71dd
--- /dev/null
+++ b/ansible/netbox/uv.lock
@@ -0,0 +1,129 @@
+version = 1
+revision = 2
+requires-python = ">=3.12"
+
+[[package]]
+name = "certifi"
+version = "2025.4.26"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705, upload-time = "2025-04-26T02:12:29.51Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618, upload-time = "2025-04-26T02:12:27.662Z" },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" },
+ { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" },
+ { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" },
+ { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" },
+ { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" },
+ { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" },
+ { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" },
+ { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" },
+ { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" },
+ { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" },
+ { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" },
+ { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" },
+ { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" },
+ { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
+]
+
+[[package]]
+name = "netbox"
+version = "0.1.0"
+source = { virtual = "." }
+dependencies = [
+ { name = "pynetbox" },
+ { name = "unifi-controller-api" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "pynetbox" },
+ { name = "unifi-controller-api" },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
+]
+
+[[package]]
+name = "pynetbox"
+version = "7.5.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "packaging" },
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d3/0b/695021a23c373991d07c1e4cb510287a521318cfc4b29f68ebbecb19fcd2/pynetbox-7.5.0.tar.gz", hash = "sha256:780064c800fb8c079c9828df472203146442ed3dd0b522a28a501204eb00c066", size = 73850, upload-time = "2025-05-20T16:03:03.831Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/44/b7/a24bc58f0e27f0cd847bf14ffbe9722604f5abe3ec9c12dd8f89cb965be8/pynetbox-7.5.0-py3-none-any.whl", hash = "sha256:ab755a0020c0abb09b4d24c8f8ba89df26f04fa56c35de73302e29a39352f031", size = 35808, upload-time = "2025-05-20T16:03:02.445Z" },
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "charset-normalizer" },
+ { name = "idna" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" },
+]
+
+[[package]]
+name = "unifi-controller-api"
+version = "0.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a6/62/0e12da83245655872fed6fdfea66fa05b36b76dd31994a8dc17fafe164c8/unifi_controller_api-0.3.0.tar.gz", hash = "sha256:a5ebaf0e739b825921ed3b94c80b0113cad6e295539397653571ad2286c81287", size = 54194, upload-time = "2025-04-14T18:57:24.028Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/33/32/38c5483b2a8dc57d3d76c3dcffbe3a47b1dfc671753ee2b62bfb529683a6/unifi_controller_api-0.3.0-py3-none-any.whl", hash = "sha256:b312aab9b460ee5d5189d704b7855d03452bfe0649cc569d8ce20c4417c75d71", size = 59134, upload-time = "2025-04-14T18:57:22.674Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" },
+]
diff --git a/ansible/plays/danneri.yml b/ansible/plays/danneri.yml
new file mode 100644
index 0000000..6b4265a
--- /dev/null
+++ b/ansible/plays/danneri.yml
@@ -0,0 +1,27 @@
+- hosts:
+ - danneri
+ tasks:
+ - import_role:
+ name: systemd-networkd
+
+ - meta: flush_handlers
+
+ - become: yes
+ apt:
+ name:
+ - etckeeper
+ - import_role:
+ name: timezone
+
+ - become: yes
+ tags: k3s
+ copy:
+ dest: /etc/rancher/k3s/config.yaml
+ content: |
+ tls-san:
+ - "danneri.dn42.trygvis.io"
+ - "{{ ipam6.networks.lhn2_dn42.hosts.danneri }}"
+ - "2a06:2240:f00d:b500:9422:d355:95b7:f170"
+ cluster-cidr: "{{ ipam6.networks.danneri_cluster.range }}"
+ service-cidr: "{{ ipam6.networks.danneri_service.range }}"
+
diff --git a/ansible/plays/files/zigbee2mqtt/garasjepi/configuration.yaml b/ansible/plays/files/zigbee2mqtt/garasjepi/configuration.yaml
new file mode 100644
index 0000000..b0b8f5a
--- /dev/null
+++ b/ansible/plays/files/zigbee2mqtt/garasjepi/configuration.yaml
@@ -0,0 +1,43 @@
+homeassistant:
+ enabled: false
+frontend:
+ enabled: true
+mqtt:
+ base_topic: zigbee2mqtt
+ #server: mqtt://ha-kv24.dn42.trygvis.io
+ server: mqtt://192.168.10.159
+ user: z2m
+ password: zigbee2mqtt
+ version: 5
+serial:
+ port: /dev/ttyACM0
+ adapter: deconz
+advanced:
+ network_key:
+ - 107
+ - 142
+ - 62
+ - 67
+ - 150
+ - 226
+ - 182
+ - 69
+ - 47
+ - 194
+ - 244
+ - 95
+ - 73
+ - 125
+ - 135
+ - 61
+ pan_id: 43701
+ ext_pan_id:
+ - 181
+ - 224
+ - 197
+ - 29
+ - 98
+ - 176
+ - 205
+ - 241
+version: 4 \ No newline at end of file
diff --git a/ansible/plays/host-garasjepi.yml b/ansible/plays/host-garasjepi.yml
new file mode 100644
index 0000000..62b0b3d
--- /dev/null
+++ b/ansible/plays/host-garasjepi.yml
@@ -0,0 +1,28 @@
+- hosts:
+ - garasjepi
+ tasks:
+ # These first
+ - become: yes
+ apt:
+ name:
+ - etckeeper
+ - git
+
+ - become: yes
+ apt:
+ name:
+ - tmux
+ - aptitude
+
+# - import_role:
+# name: timezone
+#
+# - import_role:
+# name: systemd-networkd
+
+ - become: yes
+ apt:
+ name:
+ - docker.io
+ - tmux
+ - aptitude
diff --git a/ansible/plays/host-hash.yml b/ansible/plays/host-hash.yml
new file mode 100644
index 0000000..62b781f
--- /dev/null
+++ b/ansible/plays/host-hash.yml
@@ -0,0 +1,10 @@
+- hosts: hash
+ tasks:
+ - become: yes
+ copy:
+ dest: /etc/docker/daemon.json
+ content: |
+ {
+ "ipv6": true,
+ "fixed-cidr-v6": "{{ ipam6.networks.hash_docker_dn42.range }}"
+ }
diff --git a/ansible/plays/host-lhn2pi.yml b/ansible/plays/host-lhn2pi.yml
new file mode 100644
index 0000000..551c3dd
--- /dev/null
+++ b/ansible/plays/host-lhn2pi.yml
@@ -0,0 +1,6 @@
+- hosts:
+ - lhn2pi
+ roles:
+ - systemd-networkd
+ - prometheus-bird-exporter
+ - prometheus-node-exporter
diff --git a/ansible/plays/host-unifi.yml b/ansible/plays/host-unifi.yml
new file mode 100644
index 0000000..41fb292
--- /dev/null
+++ b/ansible/plays/host-unifi.yml
@@ -0,0 +1,18 @@
+- hosts:
+ - unifi
+ tasks:
+ - become: yes
+ apt:
+ name:
+ - etckeeper
+
+ - import_role:
+ name: timezone
+
+ - import_role:
+ name: systemd-networkd
+
+ - become: yes
+ apt:
+ name:
+ - docker.io
diff --git a/ansible/plays/ipam-generate-dns.yml b/ansible/plays/ipam-generate-dns.yml
new file mode 100644
index 0000000..25f8087
--- /dev/null
+++ b/ansible/plays/ipam-generate-dns.yml
@@ -0,0 +1,34 @@
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ vars_files:
+ - ../group_vars/all/ipam.yml
+ tasks:
+ - set_fact:
+ content: |
+ {% set hosts = [] %}
+ {% for nw_name, network in ipam6.networks.items() %}
+ {% for host, address in (network.hosts|default({})).items() %}
+ {{- hosts.append({'name': host, 'address': address}) -}}
+ {% endfor %}
+ {% endfor %}
+ # Generated from ansible data
+ {% for h in hosts|sort(attribute='name') %}
+
+ resource "linode_domain_record" "dn42-{{ h.name }}" {
+ domain_id = linode_domain.root.id
+ name = "{{ h.name }}.dn42"
+ record_type = "AAAA"
+ target = "{{ h.address|ansible.utils.ipv6('address') }}"
+ }
+ {% endfor %}
+ - debug:
+ msg: "{{ content }}"
+ when: false
+ - name: Generating ../../terraform/dns/dn42.tf
+ register: tf
+ copy:
+ dest: ../../terraform/dns/dn42.tf
+ content: "{{ content }}"
+ - shell: terraform fmt ../../terraform/ipam6/ipam6.tf
+ when: "tf.changed"
diff --git a/ansible/plays/ipam-generate-tf.yml b/ansible/plays/ipam-generate-tf.yml
new file mode 100644
index 0000000..d9888b4
--- /dev/null
+++ b/ansible/plays/ipam-generate-tf.yml
@@ -0,0 +1,55 @@
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ vars_files:
+ - ../group_vars/all/ipam.yml
+ collections:
+ - ansible.utils
+ tasks:
+ - name: Generate terraform/ipam6/ipam6.tf
+ register: tf
+ copy:
+ dest: ../../terraform/ipam6/ipam6.tf
+ content: |
+ output "networks" {
+ value = {
+ {% for name, network in ipam6.networks.items() %}
+ {% if not (network.range | ansible.utils.ipv6) %}
+ Invalid network: {{ network.range }}
+ {% endif %}
+ {{ name }} = {
+ {% if network.description|default("") %}
+ description = "{{ network.description }}"
+ {% endif %}
+ range = "{{ network.range }}"
+ address = "{{ network.range|ansible.utils.ipaddr("network") }}"
+ prefix = "{{ network.range|ansible.utils.ipaddr("prefix") }}"
+ {% set hosts = network.hosts|default({}) %}
+ hosts = {
+ {% for name, addr in hosts.items() %}
+ {{ name }} = {
+ address: "{{ addr|ansible.utils.ipaddr("address") }}"
+ prefix: "{{ addr|ansible.utils.ipaddr("prefix") }}"
+ }
+ {% endfor %}
+ }
+ }
+ {% endfor %}
+ }
+ }
+
+ output "hosts" {
+ value = {
+ {% for name, network in ipam6.networks.items() %}
+ {% set hosts = network.hosts|default({}) %}
+ {% for name, addr in hosts.items() %}
+ {{ name }} = {
+ address: "{{ addr|ansible.utils.ipaddr("address") }}"
+ prefix: "{{ addr|ansible.utils.ipaddr("prefix") }}"
+ }
+ {% endfor %}
+ {% endfor %}
+ }
+ }
+ - shell: terraform fmt ../../terraform/ipam6/ipam6.tf
+ when: "tf.changed"
diff --git a/ansible/plays/kjell-ct-102.yml b/ansible/plays/kjell-ct-102.yml
new file mode 100644
index 0000000..87b9459
--- /dev/null
+++ b/ansible/plays/kjell-ct-102.yml
@@ -0,0 +1,49 @@
+- hosts:
+ - kjell-ct-102
+ vars:
+ traefik_version: 3.4.1
+ traefik_checksum: md5:f299230ea9f247a672b187a79f2e76e6719ccbee
+ traefik_template: traefik-proxy.toml.j2
+ tasks:
+ - become: yes
+ apt:
+ name:
+ - etckeeper
+ - sudo
+ tags: packages,never
+
+ - import_role:
+ name: timezone
+ tags: timezone,never
+
+ - name: Load values from sops.yml
+ community.sops.load_vars:
+ name: env
+ file: ../../sops.yml
+ tags: traefik-server,never
+
+ - import_role:
+ name: traefik-server
+ vars:
+ traefik_environment:
+ LINODE_TOKEN: "{{ env.linode_token }}"
+ tags: traefik-server,never
+
+ - name: /etc/systemd/services/traefik.service
+ become: true
+ template:
+ src: "{{ traefik_template }}"
+ dest: /etc/traefik/traefik.toml
+ owner: root
+ group: root
+ mode: 0644
+ register: template
+
+ - name: systemctl restart traefik
+ become: true
+ systemd:
+ daemon_reload: true
+ unit: traefik
+ enabled: true
+ state: restarted
+ when: template.changed
diff --git a/ansible/plays/otelcol-contrib.yml b/ansible/plays/otelcol-contrib.yml
new file mode 100644
index 0000000..f667337
--- /dev/null
+++ b/ansible/plays/otelcol-contrib.yml
@@ -0,0 +1,29 @@
+- hosts:
+ - coregonus
+ - hash
+ - knot
+ tasks:
+
+ # otelcol-contrib.deb needs to be installed first
+ # adduser otelcol-contrib systemd-journal
+
+ - name: /etc/otelcol-contrib/config.yaml
+ become: yes
+ template:
+ src: otelcol-contrib/config.yaml
+ dest: /etc/otelcol-contrib/config.yaml
+ notify: systemctl restart otelcol-contrib
+
+ - name: mkdir /var/lib/otelcol/file_storage
+ become: yes
+ file:
+ path: /var/lib/otelcol/file_storage
+ owner: otelcol-contrib
+ notify: systemctl restart otelcol-contrib
+
+ handlers:
+ - name: systemctl restart otelcol-contrib
+ become: yes
+ systemd:
+ service: otelcol-contrib
+ state: restarted
diff --git a/ansible/plays/roa-server.yml b/ansible/plays/roa-server.yml
new file mode 100644
index 0000000..c662640
--- /dev/null
+++ b/ansible/plays/roa-server.yml
@@ -0,0 +1,25 @@
+- hosts:
+ - hash
+ tasks:
+ - name: mkdir /etc/docker-service/roa-server
+ become: true
+ file:
+ path: /etc/docker-service/roa-server
+ state: directory
+ mode: 0700
+ - name: Install /etc/docker-service/roa-server/private.pem
+ become: true
+ copy:
+ dest: /etc/docker-service/roa-server/private.pem
+ content: "{{ roa_server.private }}"
+ owner: root
+ group: root
+ mode: 0444
+
+ - import_role:
+ name: docker-service
+ vars:
+ service: roa-server
+ template: templates/roa-server/docker-compose.yml
+# systemd_enabled: no
+# systemd_state: stopped
diff --git a/ansible/plays/templates/danneri/systemd-networkd/enp1s0.network b/ansible/plays/templates/danneri/systemd-networkd/enp1s0.network
new file mode 100644
index 0000000..b38116c
--- /dev/null
+++ b/ansible/plays/templates/danneri/systemd-networkd/enp1s0.network
@@ -0,0 +1,8 @@
+[Match]
+Name=enp1s0
+
+[Network]
+DHCP=ipv4
+
+[IPv6AcceptRA]
+Token=static:{{ ipam6.networks.lhn2_dn42.hosts.danneri }}
diff --git a/ansible/plays/templates/lhn2pi/systemd/network/10-eth0.network b/ansible/plays/templates/lhn2pi/systemd/network/10-eth0.network
new file mode 100644
index 0000000..853556d
--- /dev/null
+++ b/ansible/plays/templates/lhn2pi/systemd/network/10-eth0.network
@@ -0,0 +1,11 @@
+[Match]
+Name=eth0
+
+[Network]
+DHCP=ipv4
+Address={{ ipam6.networks.lhn2_dn42.hosts.lhn2pi }}
+# IPv6Forwarding=yes # needs newer systemd
+
+# Disables the automatic activation of DHCPv6 from RA packets
+[IPv6AcceptRA]
+DHCPv6Client=no
diff --git a/ansible/plays/templates/otelcol-contrib/config.yaml b/ansible/plays/templates/otelcol-contrib/config.yaml
new file mode 100644
index 0000000..671dbaa
--- /dev/null
+++ b/ansible/plays/templates/otelcol-contrib/config.yaml
@@ -0,0 +1,65 @@
+receivers:
+ journald:
+ priority: debug
+
+exporters:
+ debug:
+ verbosity: detailed
+
+ nop:
+
+ otlphttp/hash:
+ endpoint: https://loki.trygvis.io/otlp
+
+extensions:
+ file_storage/journald:
+
+processors:
+ batch: {}
+
+ transform/severity_parse:
+ log_statements:
+ - context: log
+ statements:
+ - set(resource.attributes["service.namespace"], "systemd")
+ - set(resource.attributes["service.name"], body["_SYSTEMD_UNIT"])
+ - set(resource.attributes["systemd_unit"], body["_SYSTEMD_UNIT"])
+ - set(resource.attributes["systemd_slice"], body["_SYSTEMD_SLICE"])
+ - set(resource.attributes["node"], body["_HOSTNAME"])
+
+ - set(severity_number, SEVERITY_NUMBER_TRACE) where body["PRIORITY"] == "7"
+ - set(severity_text, "debug") where body["PRIORITY"] == "7"
+ - set(severity_number, SEVERITY_NUMBER_DEBUG) where body["PRIORITY"] == "6"
+ - set(severity_text, "info") where body["PRIORITY"] == "6"
+ - set(severity_number, SEVERITY_NUMBER_INFO) where body["PRIORITY"] == "5"
+ - set(severity_text, "notice") where body["PRIORITY"] == "5"
+ - set(severity_number, SEVERITY_NUMBER_WARN) where body["PRIORITY"] == "4"
+ - set(severity_text, "warning") where body["PRIORITY"] == "4"
+ - set(severity_number, SEVERITY_NUMBER_ERROR) where body["PRIORITY"] == "3"
+ - set(severity_text, "err") where body["PRIORITY"] == "3"
+ - set(severity_number, SEVERITY_NUMBER_FATAL) where body["PRIORITY"] == "2"
+ - set(severity_text, "crit") where body["PRIORITY"] == "2"
+ - set(severity_number, SEVERITY_NUMBER_FATAL) where body["PRIORITY"] == "1"
+ - set(severity_text, "alert") where body["PRIORITY"] == "1"
+ - set(severity_number, SEVERITY_NUMBER_FATAL) where body["PRIORITY"] == "0"
+ - set(severity_text, "emerg") where body["PRIORITY"] == "0"
+
+ - set(body, body["MESSAGE"])
+
+service:
+# telemetry:
+# logs:
+# level: debug
+ extensions:
+ - file_storage/journald
+ pipelines:
+ logs:
+ receivers:
+ - journald
+ processors:
+ - transform/severity_parse
+ - batch
+ exporters:
+# - debug
+# - nop
+ - otlphttp/hash
diff --git a/ansible/plays/templates/roa-server/docker-compose.yml b/ansible/plays/templates/roa-server/docker-compose.yml
new file mode 100644
index 0000000..c11933c
--- /dev/null
+++ b/ansible/plays/templates/roa-server/docker-compose.yml
@@ -0,0 +1,14 @@
+version: "3"
+services:
+ stayrtr:
+ image: rpki/stayrtr:latest # no tagged images are available :(
+ volumes:
+ - /etc/docker-service/roa-server/id_ecdsa:/id_ecdsa:ro
+ ports:
+ - 8022:8022
+ command:
+ - -bind=
+ - -ssh.bind=:8022
+ - -ssh.key=/id_ecdsa
+ - -checktime=false
+ - -cache=https://dn42.burble.com/roa/dn42_roa_46.json
diff --git a/ansible/plays/templates/traefik-proxy.toml.j2 b/ansible/plays/templates/traefik-proxy.toml.j2
new file mode 100644
index 0000000..d538664
--- /dev/null
+++ b/ansible/plays/templates/traefik-proxy.toml.j2
@@ -0,0 +1,126 @@
+[global]
+ checkNewVersion = true
+ sendAnonymousUsage = false
+
+################################################################
+# Entrypoints configuration
+################################################################
+
+[entryPoints]
+# [entryPoints.web]
+# address = ":80"
+
+ [entryPoints.websecure]
+ address = ":443"
+
+ [entryPoints.websecure.http.tls]
+ certResolver = "linode"
+
+[log]
+
+ # Log level
+ #
+ # Optional
+ # Default: "ERROR"
+ #
+ level = "DEBUG"
+
+ # Sets the filepath for the traefik log. If not specified, stdout will be used.
+ # Intermediate directories are created if necessary.
+ #
+ # Optional
+ # Default: os.Stdout
+ #
+ # filePath = "log/traefik.log"
+
+ # Format is either "json" or "common".
+ #
+ # Optional
+ # Default: "common"
+ #
+ # format = "json"
+
+################################################################
+# Access logs configuration
+################################################################
+
+# Enable access logs
+# By default it will write to stdout and produce logs in the textual
+# Common Log Format (CLF), extended with additional fields.
+#
+# Optional
+#
+# [accessLog]
+
+ # Sets the file path for the access log. If not specified, stdout will be used.
+ # Intermediate directories are created if necessary.
+ #
+ # Optional
+ # Default: os.Stdout
+ #
+ # filePath = "/path/to/log/log.txt"
+
+ # Format is either "json" or "common".
+ #
+ # Optional
+ # Default: "common"
+ #
+ # format = "json"
+
+################################################################
+# API and dashboard configuration
+################################################################
+
+# Enable API and dashboard
+[api]
+
+ # Enable the API in insecure mode
+ #
+ # Optional
+ # Default: false
+ #
+ # insecure = true
+
+ # Enabled Dashboard
+ #
+ # Optional
+ # Default: true
+ #
+ # dashboard = false
+
+################################################################
+# Ping configuration
+################################################################
+
+# Enable ping
+[ping]
+
+ # Name of the related entry point
+ #
+ # Optional
+ # Default: "traefik"
+ #
+ # entryPoint = "traefik"
+
+
+[certificatesResolvers.linode.acme]
+ email = "root@trygvis.io"
+ storage = "acme.json"
+ [certificatesResolvers.linode.acme.dnsChallenge]
+ provider = "linode"
+ delayBeforeCheck = 1
+
+[http]
+[http.routers]
+ [http.routers.junk]
+ rule = "Host(`junk.dn42.trygvis.io`)"
+ service = "netbox"
+
+ [http.routers.junk.tls]
+ certResolver = "linode"
+
+ [http.services]
+ # Define how to reach an existing service on our infrastructure
+ [http.services.netbox.loadBalancer]
+ [[http.services.netbox.loadBalancer.servers]]
+ url = "http://[fdb1:4242:3538:2005:be24:11ff:febb:5c7f]:8080"
diff --git a/ansible/plays/templates/unifi/systemd-networkd/enp1s0.network b/ansible/plays/templates/unifi/systemd-networkd/enp1s0.network
new file mode 100644
index 0000000..251bf45
--- /dev/null
+++ b/ansible/plays/templates/unifi/systemd-networkd/enp1s0.network
@@ -0,0 +1,8 @@
+[Match]
+Name=enp1s0
+
+[Network]
+DHCP=ipv4
+
+[IPv6AcceptRA]
+Token=static:{{ ipam6.networks.lhn2_dn42.hosts.unifi }}
diff --git a/ansible/plays/zigbee2mqtt-backup.yml b/ansible/plays/zigbee2mqtt-backup.yml
new file mode 100644
index 0000000..331045b
--- /dev/null
+++ b/ansible/plays/zigbee2mqtt-backup.yml
@@ -0,0 +1,13 @@
+- hosts:
+ - zigbee2mqtt
+ tasks:
+ - become: yes
+ register: config
+ shell:
+ cmd: |
+ mp=$(docker volume inspect zigbee2mqtt --format '{{"{{.Mountpoint}}"}}')
+ cat $mp/configuration.yaml
+ - copy:
+ content: "{{ config.stdout }}"
+ dest: files/zigbee2mqtt/{{ ansible_hostname }}/configuration.yaml
+ delegate_to: localhost
diff --git a/ansible/prometheus/deploy-config.yml b/ansible/prometheus/deploy-config.yml
new file mode 100644
index 0000000..472d05c
--- /dev/null
+++ b/ansible/prometheus/deploy-config.yml
@@ -0,0 +1,19 @@
+- hosts:
+ - conflatorio
+ tasks:
+ - become: yes
+ file:
+ path: /etc/docker-service/prometheus
+ state: directory
+ owner: root
+ group: root
+
+ - become: yes
+ notify: reload prometheus
+ copy:
+ dest: /etc/docker-service/prometheus/prometheus.yml
+ src: "{{ inventory_hostname }}/prometheus.yml"
+
+ handlers:
+ - name: reload prometheus
+ shell: docker kill --signal HUP prometheus
diff --git a/ansible/prometheus/files/conflatorio/prometheus.yml b/ansible/prometheus/files/conflatorio/prometheus.yml
new file mode 100644
index 0000000..9fc1316
--- /dev/null
+++ b/ansible/prometheus/files/conflatorio/prometheus.yml
@@ -0,0 +1,25 @@
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+rule_files:
+ # - "first.rules"
+ # - "second.rules"
+
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: node
+ static_configs:
+ - targets:
+ - "knot.vpn.trygvis.io:9100"
+ - "hash.vpn.trygvis.io:9323"
+ - "conflatorio.vpn.trygvis.io:9100"
+
+ - job_name: bird
+ static_configs:
+ - targets:
+ - "knot.vpn.trygvis.io:9324"
+ - "conflatorio.vpn.trygvis.io:9324"
diff --git a/ansible/roles/prometheus-bird-exporter/handlers/main.yml b/ansible/roles/prometheus-bird-exporter/handlers/main.yml
new file mode 100644
index 0000000..f4f9381
--- /dev/null
+++ b/ansible/roles/prometheus-bird-exporter/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: restart
+ become: yes
+ systemd:
+ name: prometheus-bird-exporter
+ state: restarted
diff --git a/ansible/roles/prometheus-bird-exporter/tasks/main.yml b/ansible/roles/prometheus-bird-exporter/tasks/main.yml
new file mode 100644
index 0000000..6d8b999
--- /dev/null
+++ b/ansible/roles/prometheus-bird-exporter/tasks/main.yml
@@ -0,0 +1,18 @@
+- become: yes
+ package:
+ name: "{{ items }}"
+ state: present
+ vars:
+ items:
+ - prometheus-bird-exporter
+- name: /etc/default/prometheus-bird-exporter
+ become: yes
+ copy:
+ dest: /etc/default/prometheus-bird-exporter
+ content: |
+ # Set the command-line arguments to pass to the server.
+ # Due to shell escaping, to pass backslashes for regexes, you need to double
+ # them (\\d for \d). If running under systemd, you need to double them again
+ # (\\\\d to mean \d), and escape newlines too.
+ ARGS="-bird.v2 -format.new"
+ notify: restart
diff --git a/ansible/roles/prometheus-node-exporter/handlers/main.yml b/ansible/roles/prometheus-node-exporter/handlers/main.yml
new file mode 100644
index 0000000..f4f9381
--- /dev/null
+++ b/ansible/roles/prometheus-node-exporter/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: restart
+ become: yes
+ systemd:
+ name: prometheus-bird-exporter
+ state: restarted
diff --git a/ansible/roles/prometheus-node-exporter/tasks/main.yml b/ansible/roles/prometheus-node-exporter/tasks/main.yml
new file mode 100644
index 0000000..e7c6d18
--- /dev/null
+++ b/ansible/roles/prometheus-node-exporter/tasks/main.yml
@@ -0,0 +1,18 @@
+- become: yes
+ package:
+ name: "{{ items }}"
+ state: present
+ vars:
+ items:
+ - prometheus-node-exporter
+- name: /etc/default/prometheus-node-exporter
+ become: yes
+ copy:
+ dest: /etc/default/prometheus-node-exporter
+ content: |
+ # Set the command-line arguments to pass to the server.
+ # Due to shell escaping, to pass backslashes for regexes, you need to double
+ # them (\\d for \d). If running under systemd, you need to double them again
+ # (\\\\d to mean \d), and escape newlines too.
+ ARGS=""
+ notify: restart
diff --git a/ansible/roles/superusers/tasks/main.yml b/ansible/roles/superusers/tasks/main.yml
index c1f5a47..12672ec 100644
--- a/ansible/roles/superusers/tasks/main.yml
+++ b/ansible/roles/superusers/tasks/main.yml
@@ -16,7 +16,7 @@
unix_groups:
- sudo
- systemd-journal
- with_items: "{{ unix_groups }}"
+ with_items: "{{ unix_groups + (['docker'] if 'docker' in getent_group else []) }}"
loop_control:
loop_var: group
include_tasks: adjust-group.yml
diff --git a/ansible/roles/systemd-networkd/handlers/main.yml b/ansible/roles/systemd-networkd/handlers/main.yml
index 9656da4..c9b2603 100644
--- a/ansible/roles/systemd-networkd/handlers/main.yml
+++ b/ansible/roles/systemd-networkd/handlers/main.yml
@@ -1,4 +1,5 @@
-- name: restart
+- name: reload
+ become: yes
systemd:
name: systemd-networkd
- state: restarted
+ state: reloaded
diff --git a/ansible/roles/systemd-networkd/tasks/main.yml b/ansible/roles/systemd-networkd/tasks/main.yml
index 13c167b..aed4168 100644
--- a/ansible/roles/systemd-networkd/tasks/main.yml
+++ b/ansible/roles/systemd-networkd/tasks/main.yml
@@ -1,9 +1,18 @@
-- systemd:
+- become: yes
+ systemd:
name: systemd-networkd
state: started
enabled: yes
-- loop: "{{ systemd_networkd__files | default([]) }}"
- copy:
+- name: mkdir /etc/systemd/network
+ become: yes
+ file:
+ path: "/etc/systemd/network"
+ state: directory
+ owner: systemd-network
+ group: systemd-network
+- become: yes
+ loop: "{{ systemd_networkd__files | default([]) }}"
+ template:
src: "{{ item }}"
dest: "/etc/systemd/network/{{ item | basename }}"
- notify: restart
+ notify: reload
diff --git a/ansible/roles/traefik-server/handlers/main.yml b/ansible/roles/traefik-server/handlers/main.yml
new file mode 100644
index 0000000..6e34db4
--- /dev/null
+++ b/ansible/roles/traefik-server/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: systemctl restart traefik
+ systemd:
+ daemon_reload: true
+ unit: traefik
+ state: restarted
diff --git a/ansible/roles/traefik-server/tasks/main.yml b/ansible/roles/traefik-server/tasks/main.yml
new file mode 100644
index 0000000..98d45e5
--- /dev/null
+++ b/ansible/roles/traefik-server/tasks/main.yml
@@ -0,0 +1,56 @@
+- name: Download traefik
+ become: true
+ ansible.builtin.get_url:
+ url: https://github.com/traefik/traefik/releases/download/v{{ traefik_version }}/traefik_v{{ traefik_version }}_linux_amd64.tar.gz
+ dest: /tmp/traefik-{{ traefik_version }}.tar.gz
+ checksum: "{{ traefik_download|default('') }}"
+ register: download
+
+- name: Download checksum
+ debug:
+ msg: download.checksum_src={{ download.checksum_src }}
+ when: download.status_code == 200
+
+- name: mkdir /tmp/traefik-x.y.z
+ become: true
+ file:
+ path: /tmp/traefik-{{ traefik_version }}
+ state: directory
+
+- name: Extract traefik
+ become: true
+ unarchive:
+ remote_src: true
+ src: /tmp/traefik-{{ traefik_version }}.tar.gz
+ dest: /tmp/traefik-{{ traefik_version }}
+
+- name: Install traefik
+ become: true
+ copy:
+ remote_src: true
+ src: /tmp/traefik-{{ traefik_version }}/traefik
+ dest: /usr/local/bin/traefik
+ owner: root
+ group: root
+ mode: 0750
+
+- name: /etc/systemd/services/traefik.service
+ become: true
+ template:
+ src: traefik.service.j2
+ dest: /etc/systemd/system/traefik.service
+ owner: root
+ group: root
+ mode: 0644
+
+- name: mkdir /etc/traefik
+ become: true
+ file:
+ path: /etc/traefik
+ state: directory
+
+- name: mkdir /etc/traefik/acme
+ become: true
+ file:
+ path: /etc/traefik/acme
+ state: directory
diff --git a/ansible/roles/traefik-server/templates/traefik.service.j2 b/ansible/roles/traefik-server/templates/traefik.service.j2
new file mode 100644
index 0000000..14bc403
--- /dev/null
+++ b/ansible/roles/traefik-server/templates/traefik.service.j2
@@ -0,0 +1,52 @@
+[Unit]
+Description=traefik proxy
+After=network-online.target
+Wants=network-online.target systemd-networkd-wait-online.service
+
+AssertFileIsExecutable=/usr/local/bin/traefik
+AssertPathExists=/etc/traefik/traefik.toml
+
+[Service]
+Restart=on-abnormal
+
+#User=traefik
+#Group=traefik
+
+; Always set "-root" to something safe in case it gets forgotten in the traefikfile.
+ExecStart=/usr/local/bin/traefik --configfile=/etc/traefik/traefik.toml
+
+; Limit the number of file descriptors; see `man systemd.exec` for more limit settings.
+LimitNOFILE=1048576
+
+; Use private /tmp and /var/tmp, which are discarded after traefik stops.
+PrivateTmp=true
+
+; Use a minimal /dev (May bring additional security if switched to 'true')
+PrivateDevices=true
+
+; Hide /home, /root, and /run/user. Nobody will steal your SSH-keys.
+ProtectHome=true
+
+; Make /usr, /boot, /etc and possibly some more folders read-only.
+ProtectSystem=full
+
+; ... except /etc/ssl/traefik, because we want Letsencrypt-certificates there.
+; This merely retains r/w access rights, it does not add any new. Must still be writable on the host!
+#ReadWriteDirectories=/etc/traefik/acme
+
+; The following additional security directives only work with systemd v229 or later.
+; They further restrict privileges that can be gained by traefik. Uncomment if you like.
+; Note that you may have to add capabilities required by any plugins in use.
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE
+AmbientCapabilities=CAP_NET_BIND_SERVICE
+NoNewPrivileges=true
+{% set env=traefik_environment.items()|default({}) %}
+{% if env %}
+
+{% for k, v in env %}
+Environment="{{ k }}={{ v }}"
+{% endfor %}
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible/roles/unifi/handlers/main.yml b/ansible/roles/unifi/handlers/main.yml
deleted file mode 100644
index ce78323..0000000
--- a/ansible/roles/unifi/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-- name: update apt cache
- apt:
- update_cache: yes
diff --git a/ansible/roles/unifi/tasks/main.yml b/ansible/roles/unifi/tasks/main.yml
deleted file mode 100644
index 11c4c00..0000000
--- a/ansible/roles/unifi/tasks/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Ubiquiti APT key
- notify: update apt cache
- apt_key:
- id: 06E85760C0A52C50
- keyserver: keyserver.ubuntu.com
-
-- name: Ubiquiti APT repository
- notify: update apt cache
- copy:
- dest: /etc/apt/sources.list.d/unifi.list
- content: 'deb http://www.ubnt.com/downloads/unifi/debian stable ubiquiti'
-
-- meta: flush_handlers
-
-- name: packages
- apt:
- name: "{{ items }}"
- install_recommends: no
- vars:
- items:
- - openjdk-8-jre
- - unifi
diff --git a/ansible/unifi.yml b/ansible/unifi.yml
deleted file mode 100644
index d417a2a..0000000
--- a/ansible/unifi.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-- hosts:
- - unifi
- roles:
- - role: unifi
- tags: unifi
- become: yes
diff --git a/ansible/wg0/files/coregonus/etc/wireguard/public-wg0.key b/ansible/wg0/files/coregonus/etc/wireguard/public-wg0.key
new file mode 100644
index 0000000..16f44f9
--- /dev/null
+++ b/ansible/wg0/files/coregonus/etc/wireguard/public-wg0.key
@@ -0,0 +1 @@
+M1qJnHL6GD19On7y11uVF9m5J2noqspbfgZRmmEnwkc=
diff --git a/ansible/wg0/group_vars/all/wireguard_wg0.yml b/ansible/wg0/group_vars/all/wireguard_wg0.yml
index 3a8099c..109de3d 100644
--- a/ansible/wg0/group_vars/all/wireguard_wg0.yml
+++ b/ansible/wg0/group_vars/all/wireguard_wg0.yml
@@ -16,7 +16,7 @@ wireguard_wg0:
ipv4: 192.168.60.2
ipv6: fdf3:aad9:a885:0b3a::2
conflatorio:
- state: absent
+ state: present
ipv6: fdf3:aad9:a885:0b3a::3
arius:
state: present
@@ -33,7 +33,7 @@ wireguard_wg0:
state: present
ipv6: fdf3:aad9:a885:0b3a::9
astyanax:
- state: present
+ state: absent
ipv6: fdf3:aad9:a885:0b3a::10
allowed_ips:
- fdf3:aad9:a885:ba65::/64
@@ -66,3 +66,6 @@ wireguard_wg0:
biwia:
state: present
ipv6: fdf3:aad9:a885:0b3a::17
+ coregonus:
+ state: present
+ ipv6: fdf3:aad9:a885:0b3a::18
diff --git a/ansible/wg0/wireguard-wg0-terraform.yml b/ansible/wg0/wireguard-wg0-terraform.yml
index 33b4b47..c4c809c 100644
--- a/ansible/wg0/wireguard-wg0-terraform.yml
+++ b/ansible/wg0/wireguard-wg0-terraform.yml
@@ -7,11 +7,12 @@
content: |
# Generated from ansible data
{% for host, data in wireguard_wg0.hosts.items() %}
+ {% if data.state | default("present") == "present" %}
resource "linode_domain_record" "vpn-{{ host }}" {
domain_id = linode_domain.root.id
name = "{{ host }}.vpn"
record_type = "AAAA"
target = "{{ data.ipv6 }}"
}
+ {% endif %}
{% endfor %}
-
diff --git a/ansible/zigbee2mqtt.yml b/ansible/zigbee2mqtt.yml
new file mode 100644
index 0000000..3e8e55c
--- /dev/null
+++ b/ansible/zigbee2mqtt.yml
@@ -0,0 +1,7 @@
+- hosts:
+ - zigbee2mqtt
+ roles:
+ - role: docker
+ tags: docker
+ become: yes
+