07070100000000000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001000000000susemanager-sls   07070100000001000081B400000000000000000000000163F87E3000000023000000000000000000000000000000000000001B00000000susemanager-sls/.gitignore    *.cache*
*__pycache__*
*.pyc
*.pyo
 07070100000002000081B400000000000000000000000163F87E30000004CB000000000000000000000000000000000000002000000000susemanager-sls/Makefile.python   THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../../rel-eng/Makefile.python

# Docker tests variables
DOCKER_CONTAINER_BASE = devel/galaxy/manager/4.2/docker/containers/suma-4.2
DOCKER_REGISTRY       = registry.suse.de
DOCKER_RUN_EXPORT     = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES        = -v "$(CURDIR)/../../:/manager"

__pylint ::
	$(call update_pip_env)
	pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true

__pytest ::
	$(call update_pip_env)
	$(call install_pytest)
	cd src/tests; pytest --disable-warnings --tb=native --color=yes -v

docker_pylint ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls/; make -f Makefile.python __pylint"

docker_shell ::
	docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash

docker_pytest ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls; make -f Makefile.python __pytest"
 07070100000003000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002100000000susemanager-sls/formula_metadata  07070100000004000081B400000000000000000000000163F87E30000001A1000000000000000000000000000000000000002B00000000susemanager-sls/formula_metadata/README.md    All metadata for your custom Salt Formulas should be put here. (/srv/formula_metadata/<your-formula-name>/)
The state files need to be on a salt file root and belong to /srv/salt.

To learn more about Salt Formulas and how to write them visit: https://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html
To use your formulas effectively with SUSE Manager they additionally need a form.yml file.
   07070100000005000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001900000000susemanager-sls/formulas  07070100000006000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002200000000susemanager-sls/formulas/metadata 07070100000007000081B400000000000000000000000163F87E30000001A4000000000000000000000000000000000000002C00000000susemanager-sls/formulas/metadata/README.md   The metadata of Salt Formulas that get installed per RPM belongs in this directory.

For more information visit:
https://github.com/SUSE/spacewalk/wiki/Using-Salt-formulas-with-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Writing-Salt-Formulas-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Salt-Formula-RPMs-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/How-Salt-formulas-in-SUSE-Manager-work
07070100000008000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/formulas/states   07070100000009000081B400000000000000000000000163F87E3000000022000000000000000000000000000000000000002D00000000susemanager-sls/formulas/states/formulas.sls  include: {{ pillar["formulas"] }}
  0707010000000A000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001800000000susemanager-sls/modules   0707010000000B000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/modules/engines   0707010000000C000081B400000000000000000000000163F87E3000002030000000000000000000000000000000000000002E00000000susemanager-sls/modules/engines/mgr_events.py # -*- coding: utf-8 -*-
'''
mgr_events.py is a SaltStack engine that writes selected events to SUSE
Manager's PostgreSQL database. Additionally, it sends notifications via the
LISTEN/NOTIFY mechanism to alert SUSE Manager of newly available events.

mgr_events.py tries to keep the I/O low in high load scenarios. Therefore
events are INSERTed once they come in, but not necessarily COMMITted
immediately.

The algorithm is an implementation of token bucket:
 - a COMMIT costs one token
 - initially, commit_burst tokens are available
 - every commit_interval seconds, one new token is generated
   (up to commit_burst)
 - when an event arrives and there are tokens available it is COMMITted
   immediately
 - when an event arrives but no tokens are available, the event is INSERTed but
   not COMMITted yet. COMMIT will happen as soon as a token is available

.. versionadded:: 2018.3.0

:depends: psycopg2

Minimal configuration example

.. code:: yaml

    engines:
      - mgr_events:
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              notify_channel: suseSaltEvent

Full configuration example

.. code:: yaml

    engines:
      - mgr_events:
          commit_interval: 1
          commit_burst: 100
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              port: 5432
              notify_channel: suseSaltEvent

Most of the values have a sane default. But we still need the login and host
for the PostgreSQL database. Only the `notify_channel` there is optional. The
default for host is 'localhost'.
'''

# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import fnmatch
import hashlib

try:
    import psycopg2
    HAS_PSYCOPG2 = True
except ImportError:
    HAS_PSYCOPG2 = False

# Import salt libs
import salt.version
import salt.ext.tornado
import salt.utils.event
import json

log = logging.getLogger(__name__)

DEFAULT_COMMIT_INTERVAL = 1
DEFAULT_COMMIT_BURST = 100

def __virtual__():
    return HAS_PSYCOPG2


class Responder:
    def __init__(self, event_bus, config):
        self.config = config
        self.config.setdefault('commit_interval', DEFAULT_COMMIT_INTERVAL)
        self.config.setdefault('commit_burst', DEFAULT_COMMIT_BURST)
        self.config.setdefault('postgres_db', {})
        self.config['postgres_db'].setdefault('host', 'localhost')
        self.config['postgres_db'].setdefault('notify_channel', 'suseSaltEvent')
        self.counters = [0 for i in range(config['events']['thread_pool_size'] + 1)]
        self.tokens = config['commit_burst']
        self.event_bus = event_bus
        self._connect_to_database()
        self.event_bus.io_loop.call_later(config['commit_interval'], self.add_token)

    def _connect_to_database(self):
        db_config = self.config.get('postgres_db')
        if 'port' in db_config:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' port='{port}' password='{password}'".format(**db_config)
        else:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' password='{password}'".format(**db_config)
        log.debug("%s: connecting to database", __name__)
        while True:
            try:
                self.connection = psycopg2.connect(conn_string)
                break
            except psycopg2.OperationalError as err:
                log.error("%s: %s", __name__, err)
                log.error("%s: Retrying in 5 seconds.", __name__)
                time.sleep(5)
        self.cursor = self.connection.cursor()

    def _insert(self, tag, data):
        self.db_keepalive()
        if any([
            fnmatch.fnmatch(tag, "salt/minion/*/start"),
            fnmatch.fnmatch(tag, "salt/job/*/ret/*"),
            fnmatch.fnmatch(tag, "salt/beacon/*"),
            fnmatch.fnmatch(tag, "salt/engines/libvirt_events/*/domain/lifecycle"),
            fnmatch.fnmatch(tag, "salt/engines/libvirt_events/*/pool/lifecycle"),
            fnmatch.fnmatch(tag, "salt/engines/libvirt_events/*/network/lifecycle"),
            fnmatch.fnmatch(tag, "salt/engines/libvirt_events/*/pool/refresh"),
            fnmatch.fnmatch(tag, "salt/batch/*/start"),
            fnmatch.fnmatch(tag, "suse/manager/image_deployed"),
            fnmatch.fnmatch(tag, "suse/manager/image_synced"),
            fnmatch.fnmatch(tag, "suse/systemid/generate")
        ]) and not self._is_salt_mine_event(tag, data) and not self._is_presence_ping(tag, data):
            queue = 0
            if 'id' in data:
                hash_sum = hashlib.md5(data.get("id").encode(self.connection.encoding)).hexdigest()[0:8]
                queue = int(hash_sum, 16) % self.config['events']['thread_pool_size'] + 1
            log.debug("%s: Adding event to queue %d -> %s", __name__, queue, tag)
            try:
                self.cursor.execute(
                    'INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);',
                    (data.get("id"), json.dumps({'tag': tag, 'data': data}), queue)
                )
                self.counters[queue] += 1
                self.attempt_commit()
            except Exception as err:
                log.error("%s: %s", __name__, err)
            finally:
                log.debug("%s: %s", __name__, self.cursor.query)
        else:
            log.debug("%s: Discarding event -> %s", __name__, tag)

    def trace_log(self):
        log.trace("%s: queues sizes -> %s", __name__, self.counters)
        log.trace("%s: tokens -> %s", __name__, self.tokens)

    def _is_salt_mine_event(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_salt_mine_update(data)

    def _is_salt_mine_update(self, data):
        return data.get("fun") == "mine.update"

    def _is_presence_ping(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_test_ping(data) and self._is_batch_mode(data)

    def _is_test_ping(self, data):
        return data.get("fun") == "test.ping"

    def _is_batch_mode(self, data):
        return data.get("metadata", {}).get("batch-mode")

    @salt.ext.tornado.gen.coroutine
    def add_event_to_queue(self, raw):
        # FIXME: Drop once we only use Salt >= 3004
        if salt.version.SaltStackVersion(*salt.version.__version_info__).major < 3004:
            tag, data = self.event_bus.unpack(raw, self.event_bus.serial)
        else:
            tag, data = self.event_bus.unpack(raw)
        self._insert(tag, data)

    def db_keepalive(self):
        if self.connection.closed:
            log.error("%s: Diconnected from database. Trying to reconnect...", __name__)
            self._connect_to_database()

    @salt.ext.tornado.gen.coroutine
    def add_token(self):
        self.tokens = min(self.tokens + 1, self.config['commit_burst'])
        self.attempt_commit()
        self.trace_log()
        self.event_bus.io_loop.call_later(self.config['commit_interval'], self.add_token)

    def attempt_commit(self):
        """
        Committing to the database.
        """
        self.db_keepalive()
        if self.tokens > 0 and sum(self.counters) > 0:
            log.debug("%s: commit", __name__)
            self.cursor.execute(
                "NOTIFY {}, '{}';".format(
                    self.config['postgres_db']['notify_channel'],
                    ",".join([str(counter) for counter in self.counters]))
            )
            self.connection.commit()
            self.counters = [0 for i in range(0, self.config['events']['thread_pool_size'] + 1)]
            self.tokens -=1

def start(**config):
    '''
    Listen to events and write them to the Postgres database
    '''
    io_loop = salt.ext.tornado.ioloop.IOLoop(make_current=False)
    io_loop.make_current()
    event_bus = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=True,
            io_loop=io_loop)
    responder = Responder(event_bus, config)
    event_bus.set_event_handler(responder.add_event_to_queue)
    io_loop.start()
0707010000000D000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/pillar    0707010000000E000081B400000000000000000000000163F87E3000000147000000000000000000000000000000000000002900000000susemanager-sls/modules/pillar/README.md  Overview
========

1. In the "/etc/salt/master" add the following:

   extension_modules: /path/to/the/extension_pillar_modules

2. Copy *.py from this directory to the `extension_modules` directory.

3. Then, in the "/etc/salt/master" add the following:

   ext_pillar:
     - suma_minion: /another/path/with/the/pillar/files
 0707010000000F000081B400000000000000000000000163F87E3000003E65000000000000000000000000000000000000002E00000000susemanager-sls/modules/pillar/suma_minion.py # -*- coding: utf-8 -*-
'''
Retrieve SUSE Manager pillar data for a minion_id.
- Adds generated and static SUSE Manager pillar data.
- Adds formula pillar data.

.. code-block:: yaml

    ext_pillar:
      - suma_minion: True

'''

# Import python libs
from __future__ import absolute_import
from enum import Enum
import os
import logging
import yaml
import json
import sys
import re
import salt.utils.dictupdate
import salt.utils.stringutils

# SUSE Manager static pillar paths:
MANAGER_STATIC_PILLAR_DATA_PATH = '/usr/share/susemanager/pillar_data'
MANAGER_PILLAR_DATA_PATH = '/srv/susemanager/pillar_data'

# SUSE Manager formulas paths:
MANAGER_FORMULAS_METADATA_MANAGER_PATH = '/usr/share/susemanager/formulas/metadata'
MANAGER_FORMULAS_METADATA_STANDALONE_PATH = '/usr/share/salt-formulas/metadata'
CUSTOM_FORMULAS_METADATA_PATH = '/srv/formula_metadata'
FORMULAS_DATA_PATH = '/srv/susemanager/formula_data'
FORMULA_ORDER_FILE = FORMULAS_DATA_PATH + '/formula_order.json'

# OS images path:
IMAGES_DATA_PATH = os.path.join(MANAGER_PILLAR_DATA_PATH, 'images')

# SUSE Manager static pillar data.
MANAGER_STATIC_PILLAR = [
    'gpgkeys'
]

MANAGER_GLOBAL_PILLAR = [
    'mgr_conf'
]

MINION_PILLAR_FILES_PREFIX = "pillar_{minion_id}"
MINION_PILLAR_FILES_SUFFIXES = [".yml", "_group_memberships.yml", "_virtualization.yml", "_custom_info.yml"]

CONFIG_FILE = '/etc/rhn/rhn.conf'

formulas_metadata_cache = dict()

# Fomula group subtypes
class EditGroupSubtype(Enum):
    PRIMITIVE_LIST = "PRIMITIVE_LIST"
    PRIMITIVE_DICTIONARY = "PRIMITIVE_DICTIONARY"
    LIST_OF_DICTIONARIES = "LIST_OF_DICTIONARIES"
    DICTIONARY_OF_DICTIONARIES = "DICTIONARY_OF_DICTIONARIES"

# Set up logging
log = logging.getLogger(__name__)


def __virtual__():
    '''
    Ensure the pillar module name.
    '''
    return True

def ext_pillar(minion_id, *args):
    '''
    Find SUMA-related pillars for the registered minions and return the data.
    '''

    log.debug('Getting pillar data for the minion "{0}"'.format(minion_id))
    ret = {}

    # Including SUSE Manager static pillar data
    for static_pillar in MANAGER_STATIC_PILLAR:
        static_pillar_filename = os.path.join(MANAGER_STATIC_PILLAR_DATA_PATH, static_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(static_pillar_filename)).read(), Loader=yaml.FullLoader))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(static_pillar_filename, exc))

    # Including SUSE Manager global pillar data
    for global_pillar in MANAGER_GLOBAL_PILLAR:
        global_pillar_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, global_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(global_pillar_filename)).read(), Loader=yaml.FullLoader))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(global_pillar_filename, exc))

    # Including generated pillar data for this minion
    minion_pillar_filename_prefix = MINION_PILLAR_FILES_PREFIX.format(minion_id=minion_id)
    for suffix in MINION_PILLAR_FILES_SUFFIXES:
        data_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, minion_pillar_filename_prefix + suffix)
        if os.path.exists(data_filename):
            try:
                ret = salt.utils.dictupdate.merge(
                        ret,
                        yaml.load(open(data_filename).read(), Loader=yaml.FullLoader),
                        strategy='recurse')
            except Exception as error:
                log.error('Error accessing "{pillar_file}": {message}'.format(pillar_file=data_filename, message=str(error)))

    # Including formulas into pillar data
    try:
        ret.update(formula_pillars(minion_id, ret.get("group_ids", [])))
    except Exception as error:
        log.error('Error accessing formula pillar data: {message}'.format(message=str(error)))

    # Including images pillar
    try:
        ret.update(image_pillars(minion_id, ret.get("group_ids", []), ret.get("org_id", 1)))
    except Exception as error:
        log.error('Error accessing image pillar data: {}'.format(str(error)))

    return ret


def load_formulas_from_file(formula_filename):
    formulas = {}
    formula_file = os.path.join(FORMULAS_DATA_PATH, formula_filename)
    if os.path.exists(formula_file):
        try:
            with open(formula_file) as f:
                formulas = json.load(f)
        except Exception as error:
            log.error('Error loading formulas from file: {message}'.format(message=str(error)))
    return formulas


def formula_pillars(minion_id, group_ids):
    '''
    Find formula pillars for the minion, merge them and return the data.
    '''
    pillar = {}
    out_formulas = []

    # Loading group formulas
    data = load_formulas_from_file("group_formulas.json")
    for group in group_ids:
        for formula in data.get(str(group), []):
            formula_utf8 = salt.utils.stringutils.to_str(formula)
            formula_metadata = load_formula_metadata(formula)
            if formula_metadata.get("type", "") != "cluster-formula":
                # a minion can be in multiple cluster groups, each group with its own cluster-formulas
                # in such a case we want to merge all values from cluster-formulas
                # the values of the formula will be under different keys, mgr_clusters:cluster1:.., mgr_clusters:cluster2:...
                if formula_utf8 in out_formulas:
                    continue # already processed
            out_formulas.append(formula_utf8)
            pillar = salt.utils.dictupdate.merge(pillar,
                     load_formula_pillar(minion_id, group, formula, formula_metadata),
                     strategy='recurse')

    # Loading minion formulas
    data = load_formulas_from_file("minion_formulas.json")
    for formula in data.get(str(minion_id), []):
        formula_utf8 = salt.utils.stringutils.to_str(formula)
        if formula_utf8 in out_formulas:
            continue # already processed
        out_formulas.append(formula_utf8)
        pillar = salt.utils.dictupdate.merge(pillar,
                 load_formula_pillar(minion_id, None, formula),
                 strategy='recurse')

    # Loading the formula order
    if os.path.exists(FORMULA_ORDER_FILE):
        with open(FORMULA_ORDER_FILE) as ofile:
            order = json.load(ofile)
            pillar["formulas"] = list(filter(lambda i: i in out_formulas, order))
    else:
        pillar["formulas"] = out_formulas

    return pillar


def load_formula_pillar(minion_id, group_id, formula_name, formula_metadata = None):
    '''
    Load the data from a specific formula for a minion in a specific group, merge and return it.
    '''
    layout_filename = os.path.join( MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "form.yml")
    if not os.path.isfile(layout_filename):
        layout_filename = os.path.join(MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "form.yml")
        if not os.path.isfile(layout_filename):
            layout_filename = os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "form.yml")
            if not os.path.isfile(layout_filename):
                log.error('Error loading data for formula "{formula}": No form.yml found'.format(formula=formula_name))
                return {}

    group_filename = os.path.join(FORMULAS_DATA_PATH, "group_pillar", "{id}_{name}.json".format(id=group_id, name=formula_name)) if group_id is not None else None
    system_filename = os.path.join(FORMULAS_DATA_PATH, "pillar", "{id}_{name}.json".format(id=minion_id, name=formula_name))

    try:
        layout = yaml.load(open(layout_filename).read(), Loader=yaml.FullLoader)
        group_data = json.load(open(group_filename)) if group_filename is not None and os.path.isfile(group_filename) else {}
        system_data = json.load(open(system_filename)) if os.path.isfile(system_filename) else {}
    except Exception as error:
        log.error('Error loading data for formula "{formula}": {message}'.format(formula=formula_name, message=str(error)))
        return {}

    # if group_data starts with mgr_clusters then merge and adjust without the mgr_clusters:<cluster>:settings prefix
    cluster_name = None
    cluster_pillar_key = None
    if formula_metadata and formula_metadata.get("type", "") == "cluster-formula":
        if "cluster_pillar_key" not in formula_metadata:
            log.error("No 'cluster_pillar_key' in metadata of formula {}".format(formula_name))
        else:    
            cluster_pillar_key = formula_metadata["cluster_pillar_key"]
            group_data, cluster_name = _pillar_value_by_path(group_data, "mgr_clusters:*:{}".format(cluster_pillar_key))

    merged_data = merge_formula_data(layout, group_data, system_data)
    merged_data = adjust_empty_values(layout, merged_data)

    # put back data under cluster pillar namespace
    if cluster_name:
        merged_data = {"mgr_clusters": {cluster_name: {cluster_pillar_key: merged_data}}}

    return merged_data


def merge_formula_data(layout, group_data, system_data, scope="system"):
    '''
    Merge the group and system formula data, respecting the scope of a value.
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_scope = element.get("$scope", scope)
        value = None

        if element.get("$type", "text") in ["group", "hidden-group", "namespace"]:
            value = merge_formula_data(element, group_data.get(element_name, {}), system_data.get(element_name, {}), element_scope)
        # edit-group is handled as primitive element - use either system_data or group data, no merging
        elif element_scope == "system":
            value = system_data.get(element_name, group_data.get(element_name, element.get("$default", element.get("$placeholder", ""))))
        elif element_scope == "group":
            value = group_data.get(element_name, element.get("$default", element.get("$placeholder", "")))
        elif element_scope == "readonly":
            value = element.get("$default", element.get("$placeholder", ""))

        ret[element_name] = value
    return ret


def adjust_empty_values(layout, data):
    '''
    Adjust empty values in formula data
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_type = element.get("$type", "text")
        value = data.get(element_name, "")

        if element_type in ["group", "hidden-group", "namespace"]:
            value = adjust_empty_values(element, data.get(element_name, {}))
        elif element_type in ["edit-group"]:
            prototype = element.get("$prototype")
            subtype = get_edit_group_subtype(element)
            if subtype is EditGroupSubtype.DICTIONARY_OF_DICTIONARIES:
                value = {}
                if isinstance(data.get(element_name), dict):
                    for key, entry in list(data.get(element_name).items()):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value[key] = proc_entry
            elif subtype is EditGroupSubtype.LIST_OF_DICTIONARIES:
                value = []
                if isinstance(data.get(element_name), list):
                    for entry in data.get(element_name):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value.append(proc_entry)

        if not value and '$ifEmpty' in element:
            value = element.get("$ifEmpty")

        if value or not element.get("$optional"):
            ret[element_name] = value
    return ret

def get_edit_group_subtype(element):
    if element is not None and element.get("$prototype"):
        prototype = element.get("$prototype")
        if prototype.get("$key") is None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_LIST
        if prototype.get("$key") is not None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_DICTIONARY
        if prototype.get("$key") is None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.LIST_OF_DICTIONARIES
        if prototype.get("$key") is not None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.DICTIONARY_OF_DICTIONARIES
    return None

def image_pillars(minion_id, group_ids, org_id):
    '''
    Load image pillars

    Image pillars are automatically created after image build and are available to all minions
    '''
    ret = {}
    group_dirs = []
    org_dirs = []

    for pillar in os.listdir(IMAGES_DATA_PATH):
        pillar_path = os.path.join(IMAGES_DATA_PATH, pillar)

        # read also pilars from top dir, for backward compatibility
        if os.path.isfile(pillar_path) and pillar.endswith('.sls'):
            try:
                with open(pillar_path) as p:
                    ret = salt.utils.dictupdate.merge(ret, yaml.load(p.read(), Loader=yaml.FullLoader), strategy='recurse')
            except Exception as error:
                log.error('Error loading data for image "{image}": {message}'.format(image=pillar.path(), message=str(error)))

        elif os.path.isdir(pillar_path):
            if pillar.startswith('org') and int(pillar[3:]) == org_id:
                org_dirs.append(pillar_path)
            elif pillar.startswith('group') and int(pillar[5:]) in group_ids:
                group_dirs.append(pillar_path)

    for pillar_dir in org_dirs + group_dirs:
        for pillar in os.listdir(pillar_dir):
            pillar_path = os.path.join(pillar_dir, pillar)
            if os.path.isfile(pillar_path) and pillar.endswith('.sls'):
                try:
                    with open(pillar_path) as p:
                        ret = salt.utils.dictupdate.merge(ret, yaml.load(p.read(), Loader=yaml.FullLoader), strategy='recurse')
                except Exception as error:
                    log.error('Error loading data for image "{image}": {message}'.format(image=pillar.path(), message=str(error)))

    return ret

def load_formula_metadata(formula_name):
    if formula_name in formulas_metadata_cache:
        return formulas_metadata_cache[formula_name]

    metadata_filename = None
    metadata_paths_ordered = [
        os.path.join(MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "metadata.yml"),
        os.path.join(MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "metadata.yml"),
        os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "metadata.yml")
    ]
    
    # Take the first metadata file that exist
    for mpath in metadata_paths_ordered:
        if os.path.isfile(mpath):
            metadata_filename = mpath
            break
            
    if not metadata_filename:             
        log.error('Error loading metadata for formula "{formula}": No metadata.yml found'.format(formula=formula_name))
        return {}
    try:
        metadata = yaml.load(open(metadata_filename).read(), Loader=yaml.FullLoader)
    except Exception as error:
        log.error('Error loading data for formula "{formula}": {message}'.format(formula=formula_name, message=str(error)))
        return {}

    formulas_metadata_cache[formula_name] = metadata                 
    return metadata            

def _pillar_value_by_path(data, path):
    result = data
    first_key = None
    for token in path.split(":"):
        if token == "*":
            first_key = next(iter(result))
            result = result[first_key] if first_key else None
        elif token in result:
            result = result[token]
        else:
            break
    return result, first_key
   07070100000010000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/roster    07070100000011000081B400000000000000000000000163F87E3000002F43000000000000000000000000000000000000002800000000susemanager-sls/modules/roster/uyuni.py   """
Read in the roster from Uyuni DB
"""
import hashlib
import io
import logging

# Import Salt libs
import salt.cache
import salt.config
import salt.loader

try:
    import psycopg2
    from psycopg2.extras import NamedTupleCursor

    HAS_PSYCOPG2 = True
except ImportError:
    HAS_PSYCOPG2 = False

from yaml import dump


__virtualname__ = "uyuni"

log = logging.getLogger(__name__)


COBBLER_HOST = "localhost"
PROXY_SSH_PUSH_USER = "mgrsshtunnel"
PROXY_SSH_PUSH_KEY = (
    "/var/lib/spacewalk/" + PROXY_SSH_PUSH_USER + "/.ssh/id_susemanager_ssh_push"
)
SALT_SSH_CONNECT_TIMEOUT = 180
SSH_KEY_DIR = "/srv/susemanager/salt/salt_ssh"
SSH_KEY_PATH = SSH_KEY_DIR + "/mgr_ssh_id"
SSH_PRE_FLIGHT_SCRIPT = None
SSH_PUSH_PORT = 22
SSH_PUSH_PORT_HTTPS = 1233
SSH_PUSH_SUDO_USER = None
SSH_USE_SALT_THIN = True
SSL_PORT = 443


def __virtual__():
    if not HAS_PSYCOPG2:
        return (False, "psycopg2 is not available")

    if __opts__.get("postgres") is None or __opts__.get("uyuni_roster") is None:
        return (False, "Uyuni is not installed or configured")

    return __virtualname__


class UyuniRoster:
    """
    The class to instantiate Uyuni connection and data gathering.
    It's used to keep the DB connection, cache object and others in one instance
    to prevent race conditions on loading the module with LazyLoader.
    """

    def __init__(self, db_config, uyuni_roster_config):
        self.config_hash = hashlib.sha256(
            str(uyuni_roster_config).encode(errors="backslashreplace")
        ).hexdigest()
        self.ssh_pre_flight_script = uyuni_roster_config.get("ssh_pre_flight_script")
        self.ssh_push_port_https = uyuni_roster_config.get(
            "ssh_push_port_https", SSH_PUSH_PORT_HTTPS
        )
        self.ssh_push_sudo_user = uyuni_roster_config.get("ssh_push_sudo_user", "root")
        self.ssh_use_salt_thin = uyuni_roster_config.get(
            "ssh_use_salt_thin", SSH_USE_SALT_THIN
        )
        self.ssh_connect_timeout = uyuni_roster_config.get(
            "ssh_connect_timeout", SALT_SSH_CONNECT_TIMEOUT
        )
        self.cobbler_host = uyuni_roster_config.get("host", COBBLER_HOST)

        if "port" in db_config:
            self.db_connect_str = "dbname='{db}' user='{user}' host='{host}' port='{port}' password='{pass}'".format(
                **db_config
            )
        else:
            self.db_connect_str = (
                "dbname='{db}' user='{user}' host='{host}' password='{pass}'".format(
                    **db_config
                )
            )

        log.trace("db_connect string: %s", self.db_connect_str)
        log.debug("ssh_pre_flight_script: %s", self.ssh_pre_flight_script)
        log.debug("ssh_push_port_https: %d", self.ssh_push_port_https)
        log.debug("ssh_push_sudo_user: %s", self.ssh_push_sudo_user)
        log.debug("ssh_use_salt_thin: %s", self.ssh_use_salt_thin)
        log.debug("salt_ssh_connect_timeout: %d", self.ssh_connect_timeout)
        log.debug("cobbler.host: %s", self.cobbler_host)

        self.cache = salt.cache.Cache(__opts__)
        cache_data = self.cache.fetch("roster/uyuni", "minions")
        if "minions" in cache_data and self.config_hash != cache_data.get(
            "config_hash"
        ):
            log.debug("Flushing the cache as the config has been changed")
            self.cache.flush("roster/uyuni")

        self._init_db()

    def _init_db(self):
        log.trace("_init_db")

        try:
            self.db_connection = psycopg2.connect(
                self.db_connect_str, cursor_factory=NamedTupleCursor
            )
            log.trace("_init_db: done")
        except psycopg2.OperationalError as err:
            log.warning(
                "Unable to connect to the Uyuni DB: \n%sWill try to reconnect later."
                % (err)
            )

    def _execute_query(self, *args, **kwargs):
        log.trace("_execute_query")

        try:
            cur = self.db_connection.cursor()
            cur.execute(*args, **kwargs)
            log.trace("_execute_query: ret %s", cur)
            return cur
        except psycopg2.OperationalError as err:
            log.warning("Error during SQL prepare: %s" % (err))
            log.warning("Trying to reinit DB connection...")
            self._init_db()
            try:
                cur = self.db_connection.cursor()
                cur.execute(*args, **kwargs)
                return cur
            except psycopg2.OperationalError:
                log.warning("Unable to re-establish connection to the Uyuni DB")
                log.trace("_execute_query: ret None")
                return None

    def _get_ssh_options(
        self,
        minion_id=None,
        proxies=None,
        tunnel=False,
        user=None,
        ssh_push_port=SSH_PUSH_PORT,
    ):
        proxy_command = []
        i = 0
        for proxy in proxies:
            proxy_command.append(
                "/usr/bin/ssh -i {ssh_key_path} -o StrictHostKeyChecking=no "
                "-o User={ssh_push_user} {in_out_forward} {proxy_host}".format(
                    ssh_key_path=SSH_KEY_PATH if i == 0 else PROXY_SSH_PUSH_KEY,
                    ssh_push_user=PROXY_SSH_PUSH_USER,
                    in_out_forward="-W {host}:{port}".format(
                        host=minion_id, port=ssh_push_port
                    )
                    if not tunnel and i == len(proxies) - 1
                    else "",
                    proxy_host=proxy,
                )
            )
            i += 1
        if tunnel:
            proxy_command.append(
                "/usr/bin/ssh -i {pushKey} -o StrictHostKeyChecking=no "
                "-o User={user} -R {pushPort}:{proxy}:{sslPort} {minion} "
                "ssh -i {ownKey} -W {minion}:{sshPort} "
                "-o StrictHostKeyChecking=no -o User={user} {minion}".format(
                    pushKey=PROXY_SSH_PUSH_KEY,
                    user=user,
                    pushPort=self.ssh_push_port_https,
                    proxy=proxies[len(proxies) - 1],
                    sslPort=SSL_PORT,
                    minion=minion_id,
                    ownKey="{}{}".format(
                        "/root" if user == "root" else "/home/{}".format(user),
                        "/.ssh/mgr_own_id",
                    ),
                    sshPort=ssh_push_port,
                )
            )

        return ["ProxyCommand='{}'".format(" ".join(proxy_command))]

    def _get_ssh_minion(
        self, minion_id=None, proxies=[], tunnel=False, ssh_push_port=SSH_PUSH_PORT
    ):
        minion = {
            "host": minion_id,
            "user": self.ssh_push_sudo_user,
            "port": ssh_push_port,
            "timeout": self.ssh_connect_timeout,
        }
        if tunnel:
            minion.update({"minion_opts": {"master": minion_id}})
        if self.ssh_pre_flight_script:
            minion.update(
                {
                    "ssh_pre_flight": self.ssh_pre_flight_script,
                    "ssh_pre_flight_args": [
                        proxies[-1] if proxies else self.cobbler_host,
                        self.ssh_push_port_https if tunnel else SSL_PORT,
                        1 if self.ssh_use_salt_thin else 0,
                    ],
                }
            )
        if proxies:
            minion.update(
                {
                    "ssh_options": self._get_ssh_options(
                        minion_id=minion_id,
                        proxies=proxies,
                        tunnel=tunnel,
                        user=self.ssh_push_sudo_user,
                        ssh_push_port=ssh_push_port,
                    )
                }
            )
        elif tunnel:
            minion.update(
                {
                    "remote_port_forwards": "%d:%s:%d"
                    % (self.ssh_push_port_https, self.cobbler_host, SSL_PORT)
                }
            )

        return minion

    def targets(self):
        cache_data = self.cache.fetch("roster/uyuni", "minions")
        cache_fp = cache_data.get("fp", None)
        query = """
            SELECT ENCODE(SHA256(FORMAT('%s|%s|%s|%s|%s|%s',
                          EXTRACT(EPOCH FROM MAX(S.modified)),
                          COUNT(S.id),
                          EXTRACT(EPOCH FROM MAX(SP.modified)),
                          COUNT(SP.proxy_server_id),
                          EXTRACT(EPOCH FROM MAX(SMI.modified)),
                          COUNT(SMI.server_id)
                   )::bytea), 'hex') AS fp
                   FROM rhnServer AS S
                   INNER JOIN suseMinionInfo AS SMI ON
                         (SMI.server_id=S.id)
                   LEFT JOIN rhnServerPath AS SP ON
                        (SP.server_id=S.id)
                   WHERE S.contact_method_id IN (
                             SELECT SSCM.id
                             FROM suseServerContactMethod AS SSCM
                             WHERE SSCM.label IN ('ssh-push', 'ssh-push-tunnel')
                         )
        """
        h = self._execute_query(query)
        if h is not None:
            row = h.fetchone()
            if row and row.fp:
                log.trace("db cache fingerprint: %s", row.fp)
                new_fp = row.fp
                log.trace("cache check: old:%s new:%s", cache_fp, new_fp)
                if (
                    new_fp == cache_fp
                    and "minions" in cache_data
                    and cache_data["minions"]
                ):
                    log.debug("Returning the cached data")
                    return cache_data["minions"]
                else:
                    log.debug("Invalidate cache")
                    cache_fp = new_fp
        else:
            log.warning(
                "Unable to reconnect to the Uyuni DB. Returning the cached data instead."
            )
            return cache_data["minions"]

        ret = {}

        query = """
            SELECT S.id AS server_id,
                   SMI.minion_id AS minion_id,
                   SMI.ssh_push_port AS ssh_push_port,
                   SSCM.label='ssh-push-tunnel' AS tunnel,
                   SP.hostname AS proxy_hostname
            FROM rhnServer AS S
            INNER JOIN suseServerContactMethod AS SSCM ON
                  (SSCM.id=S.contact_method_id)
            INNER JOIN suseMinionInfo AS SMI ON
                  (SMI.server_id=S.id)
            LEFT JOIN rhnServerPath AS SP ON
                 (SP.server_id=S.id)
            WHERE SSCM.label IN ('ssh-push', 'ssh-push-tunnel')
            ORDER BY S.id, SP.position DESC
        """

        h = self._execute_query(query)

        prow = None
        proxies = []

        row = h.fetchone()
        while True:
            if prow is not None and (row is None or row.server_id != prow.server_id):
                ret[prow.minion_id] = self._get_ssh_minion(
                    minion_id=prow.minion_id,
                    proxies=proxies,
                    tunnel=prow.tunnel,
                    ssh_push_port=int(prow.ssh_push_port),
                )
            proxies = []
            if row is None:
                break
            if row.proxy_hostname:
                proxies.append(row.proxy_hostname)
            prow = row
            row = h.fetchone()

        self.cache.store(
            "roster/uyuni",
            "minions",
            {"fp": cache_fp, "minions": ret, "config_hash": self.config_hash},
        )

        if log.isEnabledFor(logging.TRACE):
            log.trace("Uyuni DB roster:\n%s", dump(ret))

        return ret


def targets(tgt, tgt_type="glob", **kwargs):
    """
    Return the targets from the Uyuni DB
    """

    uyuni_roster = __context__.get("roster.uyuni")
    if uyuni_roster is None:
        uyuni_roster = UyuniRoster(
            __opts__.get("postgres"), __opts__.get("uyuni_roster")
        )
        __context__["roster.uyuni"] = uyuni_roster

    return __utils__["roster_matcher.targets"](uyuni_roster.targets(), tgt, tgt_type)
 07070100000012000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/modules/runners   07070100000013000081B400000000000000000000000163F87E30000005ED000000000000000000000000000000000000003600000000susemanager-sls/modules/runners/kiwi-image-collect.py # SUSE Manager
# Copyright (c) 2018--2020 SUSE LLC

# runner to collect image from build host

import os
import logging

log = logging.getLogger(__name__)

def upload_file_from_minion(minion, minion_ip, filetoupload, targetdir):
    fqdn = __salt__['cache.grains'](tgt=minion).get(minion, {}).get('fqdn')
    log.info('Collecting image "{}" from minion {} (FQDN: {}, IP: {})'.format(filetoupload, minion, fqdn, minion_ip))
    if not fqdn or fqdn == 'localhost':
        fqdn = minion_ip
    src = 'root@{}:{}'.format(fqdn, filetoupload)
    return __salt__['salt.cmd'](
      'rsync.rsync',
      src, targetdir,
      rsh='ssh -o IdentityFile=/srv/susemanager/salt/salt_ssh/mgr_ssh_id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
    )

def move_file_from_minion_cache(minion, filetomove, targetdir):
    src = os.path.join(__opts__['cachedir'], 'minions', minion, 'files', filetomove.lstrip('/'))
    log.info('Collecting image from minion cache "{}"'.format(src))
    # file.move throws an exception in case of error
    return __salt__['salt.cmd']('file.move', src, targetdir)

def kiwi_collect_image(minion, minion_ip, filepath, image_store_dir):
    __salt__['salt.cmd']('file.mkdir', image_store_dir)

    use_salt_transport = __salt__['cache.pillar'](tgt=minion).get(minion, {}).get('use_salt_transport')
    if use_salt_transport:
        return move_file_from_minion_cache(minion, filepath, image_store_dir)

    return upload_file_from_minion(minion, minion_ip, filepath, image_store_dir)
   07070100000014000081B400000000000000000000000163F87E300000082D000000000000000000000000000000000000002A00000000susemanager-sls/modules/runners/mgrk8s.py from salt.exceptions import SaltInvocationError
import logging

log = logging.getLogger(__name__)

try:
    from kubernetes import client, config # pylint: disable=import-self
    from kubernetes.config import new_client_from_config
    from kubernetes.client.rest import ApiException
    from urllib3.exceptions import HTTPError
    IS_VALID = True
except ImportError as ex:
    IS_VALID = False


def __virtual__():
    return IS_VALID


def get_all_containers(kubeconfig=None, context=None):
    '''
    Retrieve information about all containers running in a Kubernetes cluster.

    :param kubeconfig: path to kubeconfig file
    :param context: context inside kubeconfig
    :return:
    .. code-block:: json
       {
            "containers": [
                {
                    "image_id": "(docker-pullable://)?some/image@sha256:hash....",
                    "image": "myregistry/some/image:v1",
                    "container_id": "(docker|cri-o)://...hash...",
                    "pod_name": "kubernetes-pod",
                    "pod_namespace": "pod-namespace"
                }
       }
    '''
    if not kubeconfig:
        raise SaltInvocationError('kubeconfig is mandatory')

    if not context:
        raise SaltInvocationError('context is mandatory')

    api_client = new_client_from_config(kubeconfig, context)
    api = client.CoreV1Api(api_client)
    pods = api.list_pod_for_all_namespaces(watch=False)
    output = dict(containers=[])
    for pod in pods.items:
        if pod.status.container_statuses is not None:
            for container in pod.status.container_statuses:
                res_cont = dict()
                res_cont['container_id'] = container.container_id
                res_cont['image'] = container.image
                res_cont['image_id'] = container.image_id
                res_cont['pod_name'] = pod.metadata.name
                res_cont['pod_namespace'] = pod.metadata.namespace
                output['containers'].append(res_cont)
        else:
            log.error("Failed to parse pod container statuses")

    return output
   07070100000015000081B400000000000000000000000163F87E3000001107000000000000000000000000000000000000002B00000000susemanager-sls/modules/runners/mgrutil.py    from subprocess import Popen, PIPE
import logging
import stat
import grp
import shlex
import os
import shutil
import salt.utils

log = logging.getLogger(__name__)

GROUP_OWNER = 'susemanager'


def delete_rejected_key(minion):
    '''
    Delete a previously rejected minion key from minions_rejected
    :param minion: the minion id to look for
    :return: map containing retcode and stdout/stderr
    '''
    path_rejected = "/etc/salt/pki/master/minions_rejected/"
    path = os.path.normpath(path_rejected + minion)
    if not path.startswith(path_rejected):
        return {"retcode": -1, "stderr": "Unexpected path: " + path}
    if os.path.isfile(path):
        cmd = ['rm', path]
        return _cmd(cmd)
    return {"retcode": 0}


def ssh_keygen(path):
    '''
    Generate SSH keys using the given path.
    :param path: the path
    :return: map containing retcode and stdout/stderr
    '''
    if os.path.isfile(path):
        return {"retcode": -1, "stderr": "Key file already exists"}
    cmd = ['ssh-keygen', '-N', '', '-f', path, '-t', 'rsa', '-q']
    # if not os.path.isdir(os.path.dirname(path)):
    #     os.makedirs(os.path.dirname(path))
    return _cmd(cmd)


def chain_ssh_cmd(hosts=None, clientkey=None, proxykey=None, user="root", options=None, command=None, outputfile=None):
    '''
    Chain ssh calls over one or more hops to run a command on the last host in the chain.
    :param hosts:
    :param clientkey:
    :param proxykey:
    :param user:
    :param options:
    :param command:
    :param outputfile:
    :return:
    '''
    cmd = []
    for idx, hostname in enumerate(hosts):
        key = clientkey if idx == 0 else proxykey
        opts = " ".join(["-o {}={}".format(opt, val) for opt, val in list(options.items())])
        ssh = "/usr/bin/ssh -i {} {} -o User={} {}"\
            .format(key, opts, user, hostname)
        cmd.extend(shlex.split(ssh))
    cmd.append(command)
    ret = _cmd(cmd)
    if outputfile:
        with open(outputfile, "w") as out:
            out.write(ret["stdout"])
    return ret

def remove_ssh_known_host(user, hostname):
    return __salt__['salt.cmd']('ssh.rm_known_host', user, hostname)


def _cmd(cmd):
    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
    stdout, stderr = p.communicate()
    return {"retcode": p.returncode, "stdout": salt.utils.stringutils.to_unicode(stdout), "stderr": salt.utils.stringutils.to_unicode(stderr)}


def move_minion_uploaded_files(minion=None, dirtomove=None, basepath=None, actionpath=None):
    srcdir = os.path.join(__opts__['cachedir'], "minions", minion, 'files', dirtomove.lstrip('/'))
    scapstorepath = os.path.join(basepath, actionpath)
    susemanager_gid = grp.getgrnam(GROUP_OWNER).gr_gid
    if not os.path.exists(scapstorepath):
        log.debug("Creating action directory: {0}".format(scapstorepath))
        try:
            os.makedirs(scapstorepath)
        except Exception as err:
            log.error('Failed to create dir {0}'.format(scapstorepath), exc_info=True)
            return {False: 'Salt failed to create dir {0}: {1}'.format(scapstorepath, str(err))}
        # change group permissions to rwx and group owner to susemanager
        mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
        subdirs = actionpath.split('/')
        for idx in range(1, len(subdirs)):
            if subdirs[0: idx] != '':
                # ignore errors. If dir has owner != salt then chmod fails but the dir
                # might still have the correct group owner
                try:
                    os.chmod(os.path.join(basepath, *subdirs[0: idx]), mode)
                except OSError:
                    pass
                try:
                    os.chown(os.path.join(basepath, *subdirs[0: idx]), -1, susemanager_gid)
                except OSError:
                    pass

    try:
        # move the files to the scap store dir
        for fl in os.listdir(srcdir):
            shutil.move(os.path.join(srcdir, fl), scapstorepath)
        # change group owner to susemanager
        for fl in os.listdir(scapstorepath):
            os.chown(os.path.join(scapstorepath, fl), -1, susemanager_gid)
    except Exception as err:
        log.error('Salt failed to move {0} -> {1}'.format(srcdir, scapstorepath), exc_info=True)
        return {False: str(err)}
    return {True: scapstorepath}

 07070100000016000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001D00000000susemanager-sls/modules/tops  07070100000017000081B400000000000000000000000163F87E30000004F3000000000000000000000000000000000000003000000000susemanager-sls/modules/tops/mgr_master_tops.py   # -*- coding: utf-8 -*-
'''
SUSE Manager master_tops module
-------------------------------

This module provides the base states top information from SUSE Manager.

The top information returned by this module is merged by Salt with the
user custom data provided in /srv/salt/top.sls file.

.. code-block:: yaml

    master_tops:
      mgr_master_tops: True
'''

# Import python libs
from __future__ import absolute_import
import logging

# Define the module's virtual name
__virtualname__ = 'mgr_master_tops'

log = logging.getLogger(__name__)

MANAGER_BASE_TOP = [
    "channels",
    "certs",
    "packages",
    "custom",
    "custom_groups",
    "custom_org",
    "formulas",
    "services.salt-minion",
    "services.docker",
    "services.kiwi-image-server",
    "ansible"
]


def __virtual__():
    '''
    Ensure the module name.
    '''
    return __virtualname__


def top(**kwargs):
    '''
    Returns the SUSE Manager top state information of a minion
    for the `base` salt environment.
    '''
    env = kwargs['opts'].get('environment') or kwargs['opts'].get('saltenv')
    if env in [None, "base"]:
        log.debug('Loading SUSE Manager TOP state information for the "base" environment')
        return {"base": MANAGER_BASE_TOP}
    return None
 07070100000018000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001C00000000susemanager-sls/pillar_data   07070100000019000081B400000000000000000000000163F87E30000001A2000000000000000000000000000000000000002800000000susemanager-sls/pillar_data/gpgkeys.yml   gpgkeys:
  res6tools:
    name: gpg-pubkey-307e3d54
    file: sle11-gpg-pubkey-307e3d54.key
  res7tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res8tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res:
    name: gpg-pubkey-0182b964
    file: res-gpg-pubkey-0182b964.key
  ubuntutools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  0707010000001A000081B400000000000000000000000163F87E300000139D000000000000000000000000000000000000001900000000susemanager-sls/pylintrc  # susemanager-sls package pylint configuration

[MASTER]

# Profiled execution.
profile=no

# Pickle collected data for later comparisons.
persistent=no


[MESSAGES CONTROL]

# Disable the message(s) with the given id(s).


disable=I0011,
	C0302,
	C0111,
	R0801,
	R0902,
	R0903,
	R0904,
	R0912,
	R0913,
	R0914,
	R0915,
	R0921,
	R0922,
	W0142,
	W0403,
	W0603,
	C1001,
	W0121,
	useless-else-on-loop,
	bad-whitespace,
	unpacking-non-sequence,
	superfluous-parens,
	cyclic-import,
	redefined-variable-type,
	no-else-return,

        # Uyuni disabled
	E0203,
	E0611,
	E1101,
	E1102

# list of disabled messages:
#I0011: 62: Locally disabling R0201
#C0302:  1: Too many lines in module (2425)
#C0111:  1: Missing docstring
#R0902: 19:RequestedChannels: Too many instance attributes (9/7)
#R0903:  Too few public methods
#R0904: 26:Transport: Too many public methods (22/20)
#R0912:171:set_slots_from_cert: Too many branches (59/20)
#R0913:101:GETServer.__init__: Too many arguments (11/10)
#R0914:171:set_slots_from_cert: Too many local variables (38/20)
#R0915:171:set_slots_from_cert: Too many statements (169/50)
#W0142:228:MPM_Package.write: Used * or ** magic
#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog'
#W0603: 72:initLOG: Using the global statement
# for pylint-1.0 we also disable
#C1001: 46, 0: Old-style class defined. (old-style-class)
#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax)
#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop)
# pylint-1.1 checks
#C:334, 0: No space allowed after bracket (bad-whitespace)
#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence)
#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens)
#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens)

[REPORTS]

# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable

# Include message's id in output
include-ids=yes

# Tells whether to display a full report or only the messages
reports=yes

# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"

[VARIABLES]

# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy


[BASIC]

# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$

# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$

# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$

# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$

# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input


[DESIGN]

# Maximum number of arguments for function / method
max-args=10

# Maximum number of locals for function / method body
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6

# Maximum number of branch for function / method body
max-branchs=20

# Maximum number of statements in function / method body
max-statements=50

# Maximum number of parents for a class (see R0901).
max-parents=7

# Maximum number of attributes for a class (see R0902).
max-attributes=7

# Minimum number of public methods for a class (see R0903).
min-public-methods=1

# Maximum number of public methods for a class (see R0904).
max-public-methods=20


[CLASSES]


[FORMAT]

# Maximum number of characters on a single line.
max-line-length=120

# Maximum number of lines in a module
max-module-lines=1000

# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string='    '


[MISCELLANEOUS]

# List of note tags to take in consideration, separated by a comma.
notes=
   0707010000001B000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001800000000susemanager-sls/reactor   0707010000001C000081B400000000000000000000000163F87E300000008A000000000000000000000000000000000000003000000000susemanager-sls/reactor/resume_action_chain.sls   resume_actionchain_execution:
  local.mgractionchains.resume:
    - tgt: {{ data['id'] }}
    - metadata:
        suma-action-chain: True
  0707010000001D000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001900000000susemanager-sls/salt-ssh  0707010000001E000081B400000000000000000000000163F87E3000002BC6000000000000000000000000000000000000002600000000susemanager-sls/salt-ssh/preflight.sh #!/bin/bash

# In case the script is executed using different interpreter than bash
# then we call the script explicitely using bash
SHPATH=$(readlink /proc/$$/exe)
if ! [ "$SHPATH" = "/bin/bash" -o "$SHPATH" = "/usr/bin/bash" ]; then
  exec bash "$0" "$@"
fi

if [ $# -lt 2 ]; then
    echo "Error: Wrong number of arguments!"
    exit 255
fi

# In case the script is executed using different interpreter than bash
# then we call the script explicitely using bash
SH_PATH=$(readlink /proc/$$/exe)
SH_NAME=$(basename "${SH_PATH}")
if ! [ "${SH_NAME}" = "bash" ]; then
  exec bash "$0" "$@"
fi

REPO_HOST=$1
REPO_PORT=$2
FAIL_ON_ERROR=1
if [ "$3" = "1" ]; then
    FAIL_ON_ERROR=0
fi
BOOTSTRAP=0
if [ "$4" = "1" ]; then
    BOOTSTRAP=1
fi
if [ ${BOOTSTRAP} -eq 1 ] && [ ${REPO_PORT} -ne 443 ]; then
    REPO_HOST="localhost"
fi
CLIENT_REPOS_ROOT="https://${REPO_HOST}:${REPO_PORT}/pub/repositories"

VENV_INST_DIR="/usr/lib/venv-salt-minion"
VENV_TMP_DIR="/var/tmp/venv-salt-minion"
VENV_HASH_FILE="venv-hash.txt"

TEMP_DIR=$(mktemp -d -t salt-bundle-XXXXXXXXXX)
trap "popd > /dev/null; rm -rf ${TEMP_DIR}" EXIT
pushd "${TEMP_DIR}" > /dev/null

function exit_with_message_code() {
    echo "$1" >&2
    if [ ${FAIL_ON_ERROR} -ne 0 ]; then
        exit $2
    fi
    exit 0
}

if [ -x /usr/bin/dnf ]; then
    INSTALLER=yum
elif [ -x /usr/bin/zypper ]; then
    INSTALLER=zypper
elif [ -x /usr/bin/yum ]; then
    INSTALLER=yum
elif [ -x /usr/bin/apt ]; then
    INSTALLER=apt
else
    exit_with_message_code "Error: Unable to detect installer on the OS!" 1
fi

if [ -x /usr/bin/wget ]; then
    output=`LANG=en_US /usr/bin/wget --no-check-certificate 2>&1`
    error=`echo $output | grep "unrecognized option"`
    if [ -z "$error" ]; then
        FETCH="/usr/bin/wget -nv -r -nd --no-check-certificate"
    else
        FETCH="/usr/bin/wget -nv -r -nd"
    fi
elif [ -x /usr/bin/curl ]; then
    output=`LANG=en_US /usr/bin/curl -k 2>&1`
    error=`echo $output | grep "is unknown"`
    if [ -z "$error" ]; then
        FETCH="/usr/bin/curl -ksSOf"
    else
        FETCH="/usr/bin/curl -sSOf"
    fi
else
    exit_with_message_code "Error: To be able to download files, please install either 'wget' or 'curl'" 2
fi

if [ "$INSTALLER" == "zypper" ] || [ "$INSTALLER" == "yum" ]; then
    ARCH=$(rpm --eval "%{_arch}")
else
    ARCH=$(dpkg --print-architecture)
fi

function getY_CLIENT_CODE_BASE() {
    local BASE=""
    local VERSION=""
    # SLES ES6 is a special case; it will install a symlink named
    # centos-release pointing to redhat-release which will make the
    # original test fail; reverting the checks does not help as this
    # will break genuine CentOS systems. So use the poor man's approach
    # to detect this special case. SLES ES7 does not have this issue
    # https://bugzilla.suse.com/show_bug.cgi?id=1132576
    # https://bugzilla.suse.com/show_bug.cgi?id=1152795
    if [ -L /usr/share/doc/sles_es-release ]; then
        BASE="res"
        VERSION=6
    elif [ -f /etc/almalinux-release ]; then
        grep -v '^#' /etc/almalinux-release | grep -q '\(AlmaLinux\)' && BASE="almalinux"
        VERSION=`grep -v '^#' /etc/almalinux-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/rocky-release ]; then
        grep -v '^#' /etc/rocky-release | grep -q '\(Rocky Linux\)' && BASE="rockylinux"
        VERSION=`grep -v '^#' /etc/rocky-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/oracle-release ]; then
        grep -v '^#' /etc/oracle-release | grep -q '\(Oracle\)' && BASE="oracle"
        VERSION=`grep -v '^#' /etc/oracle-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/alinux-release ]; then
        grep -v '^#' /etc/alinux-release | grep -q '\(Alibaba\)' && BASE="alibaba"
        VERSION=`grep -v '^#' /etc/alinux-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/centos-release ]; then
        grep -v '^#' /etc/centos-release | grep -q '\(CentOS\)' && BASE="centos"
        VERSION=`grep -v '^#' /etc/centos-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/redhat-release ]; then
        grep -v '^#' /etc/redhat-release | grep -q '\(Red Hat\)' && BASE="res"
        VERSION=`grep -v '^#' /etc/redhat-release | grep -Po '(?<=release )\d+'`
    elif [ -f /etc/os-release ]; then
        BASE=$(source /etc/os-release; echo $ID)
        VERSION=$(source /etc/os-release; echo $VERSION_ID)
    fi
    Y_CLIENT_CODE_BASE="${BASE:-unknown}"
    Y_CLIENT_CODE_VERSION="${VERSION:-unknown}"
}

function getZ_CLIENT_CODE_BASE() {
    local BASE=""
    local VERSION=""
    local PATCHLEVEL=""
    if [ -r /etc/SuSE-release ]; then
        grep -q 'Enterprise' /etc/SuSE-release && BASE='sle'
        eval $(grep '^\(VERSION\|PATCHLEVEL\)' /etc/SuSE-release | tr -d '[:blank:]')
        if [ "$BASE" != "sle" ]; then
            grep -q 'openSUSE' /etc/SuSE-release && BASE='opensuse'
            VERSION="$(grep '^\(VERSION\)' /etc/SuSE-release | tr -d '[:blank:]' | sed -n 's/.*=\([[:digit:]]\+\).*/\1/p')"
            PATCHLEVEL="$(grep '^\(VERSION\)' /etc/SuSE-release | tr -d '[:blank:]' | sed -n 's/.*\.\([[:digit:]]*\).*/\1/p')"
        fi
    elif [ -r /etc/os-release ]; then
        grep -q 'Enterprise' /etc/os-release && BASE='sle'
        if [ "$BASE" != "sle" ]; then
            grep -q 'openSUSE' /etc/os-release && BASE='opensuse'
        fi
        VERSION="$(grep '^\(VERSION_ID\)' /etc/os-release | sed -n 's/.*"\([[:digit:]]\+\).*/\1/p')"
        PATCHLEVEL="$(grep '^\(VERSION_ID\)' /etc/os-release | sed -n 's/.*\.\([[:digit:]]*\).*/\1/p')"
    fi
    Z_CLIENT_CODE_BASE="${BASE:-unknown}"
    Z_CLIENT_CODE_VERSION="${VERSION:-unknown}"
    Z_CLIENT_CODE_PATCHLEVEL="${PATCHLEVEL:-0}"
}

function getA_CLIENT_CODE_BASE() {
    local BASE=""
    local VERSION=""
    local VARIANT_ID=""

    if [ -f /etc/os-release ]; then
        BASE=$(source /etc/os-release; echo $ID)
        VERSION=$(source /etc/os-release; echo $VERSION_ID)
        VARIANT_ID=$(source /etc/os-release; echo $VARIANT_ID)
    fi
    A_CLIENT_CODE_BASE="${BASE:-unknown}"
    local VERCOMPS=(${VERSION/\./ }) # split into an array 18.04 -> (18 04)
    A_CLIENT_CODE_MAJOR_VERSION=${VERCOMPS[0]}
    # Ubuntu only
    if [ "${BASE}" == "ubuntu" ]; then
        A_CLIENT_CODE_MINOR_VERSION=$((${VERCOMPS[1]} + 0)) # convert "04" -> 4
    fi
    A_CLIENT_VARIANT_ID="${VARIANT_ID:-unknown}"
}

if [ "${INSTALLER}" = "yum" ]; then
    getY_CLIENT_CODE_BASE
    CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${Y_CLIENT_CODE_BASE}/${Y_CLIENT_CODE_VERSION}/bootstrap"
    # In case of Red Hat derivatives, check if bootstrap repository is available, if not, fallback to RES.
    if [ "$Y_CLIENT_CODE_BASE" == almalinux ] || \
      [ "$Y_CLIENT_CODE_BASE" == rockylinux ] || \
      [ "$Y_CLIENT_CODE_BASE" == oracle ] || \
      [ "$Y_CLIENT_CODE_BASE" == alibaba ] || \
      [ "$Y_CLIENT_CODE_BASE" == centos ]; then
        $FETCH $CLIENT_REPO_URL/repodata/repomd.xml &> /dev/null
        if [ $? -ne 0 ]; then
            CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/res/${Y_CLIENT_CODE_VERSION}/bootstrap"
        fi
    fi
elif [ "${INSTALLER}" = "zypper" ]; then
    getZ_CLIENT_CODE_BASE
    CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${Z_CLIENT_CODE_BASE}/${Z_CLIENT_CODE_VERSION}/${Z_CLIENT_CODE_PATCHLEVEL}/bootstrap"
elif [ "${INSTALLER}" = "apt" ]; then
    getA_CLIENT_CODE_BASE
    if [ "${A_CLIENT_CODE_BASE}" == "debian" ] || [ "${A_CLIENT_CODE_BASE}" == "raspbian" ]; then
        CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_CODE_MAJOR_VERSION}/bootstrap"
    elif [ "${A_CLIENT_CODE_BASE}" == "astra" ]; then
        CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_VARIANT_ID}/bootstrap"
    else
        CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_CODE_MAJOR_VERSION}/${A_CLIENT_CODE_MINOR_VERSION}/bootstrap"
    fi
fi

VENV_FILE="venv-enabled-${ARCH}.txt"
VENV_ENABLED_URL="${CLIENT_REPO_URL}/${VENV_FILE}"
$FETCH $VENV_ENABLED_URL > /dev/null 2>&1

if [ -f "${VENV_FILE}" ]; then
    VENV_SOURCE="bootstrap"
else
    if [ "${INSTALLER}" = "apt" ] && dpkg-query -s venv-salt-minion > /dev/null 2>&1 && [ -d "${VENV_INST_DIR}" ]; then
        VENV_SOURCE="dpkg"
    elif rpm -q --quiet venv-salt-minion 2> /dev/null && [ -d "${VENV_INST_DIR}" ]; then
        VENV_SOURCE="rpm"
    fi
fi

if [ -n "${VENV_SOURCE}" ]; then
    if [ "${VENV_SOURCE}" = "bootstrap" ]; then
        VENV_ENABLED=$(cat "${VENV_FILE}")
        VENV_HASH=$(echo "${VENV_ENABLED}" | sed 's/ .*//')
        VENV_PKG_PATH=$(echo "${VENV_ENABLED}" | sed 's/^.* //')
        if [ -z "${VENV_HASH}" ] || [ -z "${VENV_PKG_PATH}" ]; then
            exit_with_message_code "Error: File ${CLIENT_REPO_URL}/${VENV_FILE} is malformed!" 4
        fi
    elif [ "${VENV_SOURCE}" = "rpm" ]; then
        VENV_HASH=$(rpm -qi venv-salt-minion | sha256sum | tr -d '\- ')
    elif [ "${VENV_SOURCE}" = "dpkg" ]; then
        VENV_HASH=$(dpkg -s venv-salt-minion | sha256sum | tr -d '\- ')
    fi
    if [ -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}" ]; then
        if [ -x "${VENV_TMP_DIR}/bin/python" ]; then
            PRE_VENV_HASH=$(cat "${VENV_TMP_DIR}/${VENV_HASH_FILE}")
        else
            rm -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
        fi
    fi
    if [ "${VENV_HASH}" != "${PRE_VENV_HASH}" ]; then
        if [ "${VENV_SOURCE}" = "bootstrap" ]; then
            VENV_PKG_URL="${CLIENT_REPO_URL}/${VENV_PKG_PATH}"
            $FETCH $VENV_PKG_URL > /dev/null 2>&1
            VENV_PKG_FILE=$(basename "${VENV_PKG_PATH}")
            if [ ! -f "${VENV_PKG_FILE}" ] && [ -z "${PRE_VENV_HASH}" ]; then
                exit_with_message_code "Error: Unable to download $VENV_PKG_URL file!" 5
            fi
        fi
        rm -rf "${VENV_TMP_DIR}"
        if [ "${VENV_SOURCE}" = "bootstrap" ]; then
            mkdir -p "${VENV_TMP_DIR}"
            pushd "${VENV_TMP_DIR}" > /dev/null
            if [ "${VENV_PKG_FILE##*\.}" = "deb" ]; then
                dpkg-deb -x "${TEMP_DIR}/${VENV_PKG_FILE}" .
                rm -rf etc lib var usr/bin usr/sbin usr/share usr/lib/tmpfiles.d
            else
                rpm2cpio "${TEMP_DIR}/${VENV_PKG_FILE}" | cpio -idm '*/lib/venv-salt-minion/*' >> /dev/null 2>&1
            fi
            mv usr usr.tmp
            mv usr.tmp/lib/venv-salt-minion/* .
            rm -rf usr.tmp
            if [ ! -x bin/python ]; then
                rm -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
                exit_with_message_code "Error: Unable to extract the bundle from ${TEMP_DIR}/${VENV_PKG_FILE}!" 6
            fi
        else
            cp -r "${VENV_INST_DIR}" "${VENV_TMP_DIR}"
            pushd "${VENV_TMP_DIR}" > /dev/null
        fi
        grep -m1 -r "^#\!${VENV_INST_DIR}" bin/ | sed 's/:.*//' | sort | uniq | xargs -I '{}' sed -i "1s=^#!${VENV_INST_DIR}/bin/.*=#!${VENV_TMP_DIR}/bin/python=" {}
        sed -i "s#${VENV_INST_DIR}#${VENV_TMP_DIR}#g" bin/python
        popd > /dev/null
        echo "${VENV_HASH}" > "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
    fi
else
    if [ ! -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}" ]; then
        exit_with_message_code "Error: Unable to download ${CLIENT_REPO_URL}/${VENV_FILE} file!" 3
    fi
fi
  0707010000001F000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001500000000susemanager-sls/salt  07070100000020000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002200000000susemanager-sls/salt/actionchains 07070100000021000081B400000000000000000000000163F87E3000000412000000000000000000000000000000000000003A00000000susemanager-sls/salt/actionchains/force_restart_minion.sh #!/bin/bash
if [ "$(readlink /proc/1/exe)" = "/sbin/init" ]; then
   # SysV, use pid ctime as service start time
   SALT_MINION_NAME="salt-minion"
   SALT_MINION_PID="/var/run/salt-minion.pid"
   if [ -f /var/run/venv-salt-minion.pid ]; then
       SALT_MINION_NAME="venv-salt-minion"
       SALT_MINION_PID="/var/run/venv-salt-minion.pid"
   fi
   T0=$(stat -c '%Z' "$SALT_MINION_PID")
   RESTART_MINION="/usr/sbin/rc$SALT_MINION_NAME restart"
else
   # systemd
   SALT_MINION_NAME="salt-minion"
   if systemctl status venv-salt-minion > /dev/null 2>&1; then
       SALT_MINION_NAME="venv-salt-minion"
   fi
   TIME=$(systemctl show "$SALT_MINION_NAME" --property=ActiveEnterTimestamp)
   TIME="${TIME//ActiveEnterTimestamp=/}"
   T0=$(date -d "$TIME" '+%s')
   RESTART_MINION="systemctl restart $SALT_MINION_NAME"
fi

T1=$(date '+%s')
echo "salt-minion service uptime: $(( T1-T0 )) seconds"
if (( (T1-T0) > 5 )); then
   echo "Patch to update salt-minion was installed but service was not restarted. Forcing restart."
   $RESTART_MINION
fi
  07070100000022000081B400000000000000000000000163F87E3000000119000000000000000000000000000000000000003000000000susemanager-sls/salt/actionchains/resumessh.sls   resumessh:
    mgrcompat.module_run:
    -   name: mgractionchains.resume
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}

include:
  - util.synccustomall
   07070100000023000081B400000000000000000000000163F87E3000000151000000000000000000000000000000000000002F00000000susemanager-sls/salt/actionchains/startssh.sls    startssh:
    mgrcompat.module_run:
    -   name: mgractionchains.start
    -   actionchain_id: {{ pillar.get('actionchain_id')}}
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}

include:
  - util.synccustomall
   07070100000024000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001D00000000susemanager-sls/salt/ansible  07070100000025000081B400000000000000000000000163F87E300000010F000000000000000000000000000000000000002600000000susemanager-sls/salt/ansible/init.sls # Ansible Control Node prerequisities state
#
# Copyright (c) 2017 - 2021 SUSE LLC

{% if pillar['addon_group_types'] is defined and 'ansible_control_node' in pillar['addon_group_types'] %}
mgr_ansible_installed:
  pkg.installed:
    - pkgs:
      - ansible

{% endif %}
 07070100000026000081B400000000000000000000000163F87E3000000290000000000000000000000000000000000000002D00000000susemanager-sls/salt/ansible/runplaybook.sls  #
# SLS to trigger a playbook execution on an Ansible control node
#
# This SLS requires pillar data to render properly.
#
# Example (inventory is optional):
#
# pillar = {
#   "playbook_path": "/root/ansible-examples/lamp_simple/site.yml",
#   "rundir": "/root/ansible-examples/lamp_simple"
#   "inventory_path": "/root/ansible-examples/lamp_simple/hosts"
# }
#

run_ansible_playbook:
  ansible.playbooks:
    - name: {{ pillar["playbook_path"] }}
    - rundir: {{ pillar["rundir"] }}
    - ansible_kwargs:
        flush_cache: {{ pillar["flush_cache"] }}
{%- if "inventory_path" in pillar %}
        inventory: {{ pillar["inventory_path"] }}
{% endif %}
07070100000027000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/salt/bootloader   07070100000028000081FD00000000000000000000000163F87E3000000235000000000000000000000000000000000000003900000000susemanager-sls/salt/bootloader/42_uyuni_reinstall.templ  #!/bin/sh
set -e

. "$pkgdatadir/grub-mkconfig_lib"

rel_dirname=`make_system_path_relative_to_its_root /boot`

echo "menuentry \"{{ pillar.get('uyuni-reinstall-name') }}\" {"
if [ -d /sys/firmware/efi ] && [ "x${GRUB_USE_LINUXEFI}" = "xtrue" ]; then
    echo "    linuxefi ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('kopts') }}"
    echo "    initrdefi ${rel_dirname}/uyuni-reinstall-initrd"
else
    echo "    linux ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('kopts') }}"
    echo "    initrd ${rel_dirname}/uyuni-reinstall-initrd"
fi
echo "}"

   07070100000029000081B400000000000000000000000163F87E3000000A8F000000000000000000000000000000000000003000000000susemanager-sls/salt/bootloader/autoinstall.sls   {% if pillar['kernel'] and pillar['initrd'] %}
mgr_copy_kernel:
  file.managed:
    - name: /boot/uyuni-reinstall-kernel
    - source: salt://bootloader/{{ pillar.get('kernel') }}

mgr_copy_initrd:
  file.managed:
    - name: /boot/uyuni-reinstall-initrd
    - source: salt://bootloader/{{ pillar.get('initrd') }}

{% set loader_type = salt['cmd.run']('if [ -f /etc/sysconfig/bootloader ]; then source /etc/sysconfig/bootloader 2> /dev/null; fi;
if [ -z "${LOADER_TYPE}" ]; then
if [ $(which grubonce 2> /dev/null) ] && [ !$(which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="grub";
elif [ $(which elilo 2> /dev/null) ] && [ !$(which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="elilo";
fi;
fi; echo "${LOADER_TYPE}"', python_shell=True) %}
{% if loader_type == 'grub' %}
mgr_create_grub_entry:
  file.append:
    - name: /boot/grub/menu.lst
    - template: jinja
    - source: salt://bootloader/grub1_uyuni_reinstall.templ
    - require:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd

mgr_grub_boot_once:
  cmd.run:
    - name: grubonce "{{ pillar.get('uyuni-reinstall-name') }}"
    - onchanges:
      - file: mgr_create_grub_entry
{% elif loader_type == 'elilo' %}
mgr_create_elilo_entry:
  file.append:
    - name: /etc/elilo.conf
    - template: jinja
    - source: salt://bootloader/elilo_uyuni_reinstall.templ
    - require:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd

mgr_set_default_boot:
  file.replace:
    - name: /etc/elilo.conf
    - pattern: default = .*
    - repl: default = {{ pillar.get('uyuni-reinstall-name') }}
    - require:
      - file: mgr_create_elilo_entry

mgr_elilo_copy_config:
  cmd.run:
    - name: elilo
    - onchanges:
      - file: mgr_create_elilo_entry
      - file: mgr_set_default_boot
{% else %}
mgr_create_grub2_entry:
  file.managed:
    - name: /etc/grub.d/42_uyuni_reinstall
    - source: salt://bootloader/42_uyuni_reinstall.templ
    - template: jinja
    - mode: 0755

mgr_set_default_boot:
  file.replace:
    - name: /etc/default/grub
    - pattern: GRUB_DEFAULT=.*
    - repl: GRUB_DEFAULT={{ pillar.get('uyuni-reinstall-name') }}
    - require:
      - file: mgr_create_grub2_entry

mgr_generate_grubconf:
  cmd.run:
    - name: grub2-mkconfig -o /boot/grub2/grub.cfg
    - onchanges:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd
      - file: mgr_create_grub2_entry
      - file: mgr_set_default_boot
{% endif %}

mgr_autoinstall_start:
  cmd.run:
    - name: shutdown -r +1
    - require:
{% if loader_type == 'grub' %}
      - cmd: mgr_grub_boot_once
{% elif loader_type == 'elilo' %}
      - cmd: mgr_elilo_copy_config
{% else %}
      - cmd: mgr_generate_grubconf
{% endif %}

{% endif %}
 0707010000002A000081B400000000000000000000000163F87E30000000CA000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/elilo_uyuni_reinstall.templ   
image = uyuni-reinstall-kernel
###Created for automated reinstallation
    label = {{ pillar.get('uyuni-reinstall-name') }}
    append = "{{ pillar.get('kopts') }}"
    initrd = uyuni-reinstall-initrd
  0707010000002B000081B400000000000000000000000163F87E30000000BC000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/grub1_uyuni_reinstall.templ   
###Created for automated reinstallation
title {{ pillar.get('uyuni-reinstall-name') }}
kernel /boot/uyuni-reinstall-kernel {{ pillar.get('kopts') }}
  initrd /boot/uyuni-reinstall-initrd
0707010000002C000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001F00000000susemanager-sls/salt/bootstrap    0707010000002D000081B400000000000000000000000163F87E30000001FB000000000000000000000000000000000000002E00000000susemanager-sls/salt/bootstrap/bootstrap.repo # SUSE Manager bootstrap repository
# Do not edit this file, changes will be overwritten
{%- if grains['os_family'] == 'Debian' %}
deb [trusted=yes] {{bootstrap_repo_url}} bootstrap main
{%- else %}
[SUSE-Manager-Bootstrap]
name=SUSE-Manager-Bootstrap
type=rpm-md
baseurl={{bootstrap_repo_url}}
gpgcheck=0
enabled=1
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 8 %}
module_hotfixes=1
{%- elif grains['os_family'] == 'Suse' %}
autorefresh=1
keeppackages=0
{%- endif %}
{%- endif %}
 0707010000002E000081B400000000000000000000000163F87E3000002259000000000000000000000000000000000000002800000000susemanager-sls/salt/bootstrap/init.sls   # Make sure no SUSE Manager server aliasing left over from ssh-push via tunnel
mgr_server_localhost_alias_absent:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

# disable all susemanager:* repos
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

# SUSE OS Family
{%- if grains['os_family'] == 'Suse' %}
  {% set os_base = 'sle' %}
  {% set osrelease_major = grains['osrelease_info'][0] %}
  #exceptions to the family rule
  {%- if "opensuse" in grains['oscodename']|lower %}
    {% set os_base = 'opensuse' %}
  {%- endif %}
  {%- if (grains['osrelease_info']| length) < 2 %}
    {% set osrelease_minor = 0 %}
  {%- else %}
    {% set osrelease_minor = grains['osrelease_info'][1] %}
  {%- endif %}
  #end of expections
  {% set osrelease = osrelease_major|string + '/' + osrelease_minor|string %}
{%- endif %}

# Debian OS Family
{%- if grains['os_family'] == 'Debian' %}
  ## This common part should cover most of distro e.g. Debian, Ubuntu
  {%- set os_base = grains['os_family']|lower %}
  {% set osrelease = grains['osrelease_info'][0] %}
  #exceptions to the family rule
  {%- if 'astraLinuxce' in grains['osfullname']|lower %}
    {%- set os_base = 'astra' %}
    {% set osrelease = grains['oscodename'] %}
  {%- elif grains['os'] == 'Ubuntu' %}
    {%- set os_base = grains['os']|lower %}
    {% set osrelease = grains['osrelease_info'][0]|string + '/' + grains['osrelease_info'][1]|string %}
  {%- endif %}
  #end of expections
{%- endif %}


# RedHat OS Family
{%- if grains['os_family'] == 'RedHat' %}
  ## This common part should cover most of distro e.g. Centos
  {%- set os_base = grains['os']|lower %} 
  {% set osrelease = grains['osrelease_info'][0] %}
  #exception to the family rule
  {%- if 'redhat' in grains['osfullname']|lower  %}
    {%- set os_base = 'res' %}
  {%- elif 'sle' in grains['osfullname']|lower %}
    {%- set os_base = 'res' %}
  {%- elif 'rocky' in grains['osfullname']|lower %}
    {%- set os_base = 'rockylinux' %}
  {%- elif 'amazon' in grains['osfullname']|lower %}
    {%- set os_base = 'amzn' %}
  {%- elif 'alibaba' in grains['osfullname']|lower %}
    {%- set os_base = 'alibaba' %}
  {%- elif 'oracle' in grains['osfullname']|lower %}
    {%- set os_base = 'oracle' %}
  {%- endif %}
  #end of expections
{%- endif %}

{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ osrelease ~ '/bootstrap/' %}

{%- if grains['os_family'] == 'RedHat' or  grains['os_family'] == 'Suse'%}
  {% set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
  {%- if 'status' not in bootstrap_repo_request %}
    {{ raise('Missing request status: {}'.format(bootstrap_repo_request)) }}
  # if bootstrap does not work, try with RedHat and re-test
  {%- elif grains['os_family'] == 'RedHat' and not (0 < bootstrap_repo_request['status'] < 300) %}
    {%- set os_base = 'res' %}
    {% set osrelease = grains['osrelease_info'][0] %}
    {% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ osrelease ~ '/bootstrap/' %}
    {% set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
    {%- if 'status' not in bootstrap_repo_request %}
      {{ raise('Missing request status: {}'.format(bootstrap_repo_request)) }}
    {%- elif bootstrap_repo_request['status'] == 901 %}
      {{ raise(bootstrap_repo_request['error']) }}
    {%- endif %}
  {%- elif bootstrap_repo_request['status'] == 901 %}
    {{ raise(bootstrap_repo_request['error']) }}
  {%- endif %}
  {%- set bootstrap_repo_exists = (0 < bootstrap_repo_request['status'] < 300) %}
{%- elif grains['os_family'] == 'Debian' %}
  {%- set bootstrap_repo_exists = (0 < salt['http.query'](bootstrap_repo_url + 'dists/bootstrap/Release', status=True, verify_ssl=False).get('status', 0) < 300) %}
{%- endif %}


bootstrap_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: /etc/zypp/repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'RedHat' %}
    - name: /etc/yum.repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'Debian' %}
    - name: /etc/apt/sources.list.d/susemanager_bootstrap.list
{%- endif %}
    - source:
      - salt://bootstrap/bootstrap.repo
    - template: jinja
    - context:
      bootstrap_repo_url: {{bootstrap_repo_url}}
    - mode: 644
    - require:
      - host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
      - mgrcompat: disable_repo_*
{%- endif %}
    - onlyif:
      - ([ {{ bootstrap_repo_exists }} = "True" ])

{% include 'channels/gpg-keys.sls' %}

{%- set salt_minion_name = 'salt-minion' %}
{%- set salt_config_dir = '/etc/salt' %}
{% set venv_available_request = salt['http.query'](bootstrap_repo_url + 'venv-enabled-' + grains['osarch'] + '.txt', status=True, verify_ssl=False) %}
{# Prefer venv-salt-minion if available and not disabled #}
{%- set use_venv_salt = salt['pillar.get']('mgr_force_venv_salt_minion') or (0 < venv_available_request.get('status', 404) < 300) and not salt['pillar.get']('mgr_avoid_venv_salt_minion') %}
{%- if use_venv_salt %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set salt_config_dir = '/etc/venv-salt-minion' %}
{%- endif -%}

salt-minion-package:
  pkg.installed:
    - name: {{ salt_minion_name }}
    - install_recommends: False
    - require:
      - file: bootstrap_repo

{# We must install "python3-contextvars" on DEB based distros, running Salt 3004, with Python version < 3.7, like Ubuntu 18.04 #}
{# We cannot make this package a hard depedendency for Salt DEB package because this is only needed in Ubuntu 18.04 #}
{# DEB based distros with Python version >= 3.7 does not need this package - package is not existing in such cases #}
{# Since we only maintain a single DEB package for all DEB based distros, we need to explicitely install the package here #}
{%- set contextvars_needed = False %}
{%- if salt_minion_name == 'salt-minion' and grains['os_family'] == 'Debian' and grains['pythonversion'][0] >= 3 and grains['pythonversion'][1] < 7 %}
  {%- if not (grains['os'] == 'Ubuntu' and grains['osrelease_info'][0] == 16) and not (grains['os'] == 'Debian' and grains['osrelease_info'][0] == 9) %}
    {%- set contextvars_needed = True %}
  {%- endif %}
{%- endif %}

{% if contextvars_needed %}
salt-install-contextvars:
  pkg.installed:
    - name: python3-contextvars
    - install_recommends: False
    - require:
      - file: bootstrap_repo
      - pkg: salt-minion-package
{% endif %}

{{ salt_config_dir }}/minion.d/susemanager.conf:
  file.managed:
    - source:
      - salt://bootstrap/susemanager.conf
    - template: jinja
    - mode: 644
    - require:
      - pkg: salt-minion-package

{{ salt_config_dir }}/minion_id:
  file.managed:
    - contents_pillar: minion_id
    - require:
      - pkg: salt-minion-package

{% include 'bootstrap/remove_traditional_stack.sls' %}

mgr_update_basic_pkgs:
  pkg.latest:
    - pkgs:
      - openssl
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] and grains['cpuarch'] in ['i586', 'x86_64'] %}
      - pmtools
{%- elif grains['cpuarch'] in ['aarch64', 'x86_64'] %}
      - dmidecode
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
      - zypper
{%- elif grains['os_family'] == 'RedHat' %}
      - yum
{%- endif %}

# Manage minion key files in case they are provided in the pillar
{% if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
{{ salt_config_dir }}/pki/minion/minion.pub:
  file.managed:
    - contents_pillar: minion_pub
    - mode: 644
    - makedirs: True
    - require:
      - pkg: salt-minion-package

{{ salt_config_dir }}/pki/minion/minion.pem:
  file.managed:
    - contents_pillar: minion_pem
    - mode: 400
    - makedirs: True
    - require:
      - pkg: salt-minion-package

{{ salt_minion_name }}:
  service.running:
    - name: {{ salt_minion_name }}
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: {{ salt_config_dir }}/minion_id
      - file: {{ salt_config_dir }}/pki/minion/minion.pem
      - file: {{ salt_config_dir }}/pki/minion/minion.pub
      - file: {{ salt_config_dir }}/minion.d/susemanager.conf
{% else %}
{{ salt_minion_name }}:
  service.running:
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: {{ salt_config_dir }}/minion_id
      - file: {{ salt_config_dir }}/minion.d/susemanager.conf
{% endif %}
   0707010000002F000081B400000000000000000000000163F87E30000007A8000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootstrap/remove_traditional_stack.sls   # disable all spacewalk:* repos
{% set repos_disabled = {'match_str': 'spacewalk:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

include:
  - util.syncstates

disable_spacewalksd:
  service.dead:
    - name: rhnsd
    - enable: False

disable_spacewalk-update-status:
  service.dead:
    - name: spacewalk-update-status
    - enable: False

disable_osad:
  service.dead:
    - name: osad
    - enable: False

remove_traditional_stack_all:
  pkg.removed:
    - pkgs:
      - spacewalk-check
      - spacewalk-client-setup
      - osad
      - osa-common
      - mgr-osad
      - spacewalksd
      - mgr-daemon
      - rhnlib
      - rhnmd
{%- if grains['os_family'] == 'Suse' %}
      - zypp-plugin-spacewalk
{%- elif grains['os_family'] == 'RedHat' %}
      - yum-rhn-plugin
      - rhnsd
      - rhn-check
      - rhn-setup
      - rhn-client-tools
{%- elif grains['os_family'] == 'Debian' %}
      - apt-transport-spacewalk
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - mgrcompat: disable_repo*
{%- endif %}

remove_traditional_stack:
  pkg.removed:
    - pkgs:
      - spacewalk-client-tools
      - rhncfg
      - mgr-cfg
{%- if grains['os_family'] == 'Suse' %}
      - suseRegisterInfo
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - mgrcompat: disable_repo*
{%- endif %}
    - unless: rpm -q spacewalk-proxy-common || rpm -q spacewalk-common

# only removing apt-transport-spacewalk above
# causes apt-get update to 'freeze' if this
# file is still present and referencing a
# method not present anymore.
{%- if grains['os_family'] == 'Debian' %}
remove_spacewalk_sources:
  file.absent:
    - name: /etc/apt/sources.list.d/spacewalk.list
{%- endif %}

# Remove suseRegisterInfo in a separate yum transaction to avoid being called by
# the yum plugin.
{%- if grains['os_family'] == 'RedHat' %}
remove_suse_register_info_rh:
  pkg.removed:
    - name: suseRegisterInfo
{%- endif %}
07070100000030000081B400000000000000000000000163F87E3000000379000000000000000000000000000000000000002D00000000susemanager-sls/salt/bootstrap/set_proxy.sls  {%- set conf_file = '/etc/salt/minion.d/susemanager.conf' %}
{%- set salt_service = 'salt-minion' %}

{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set conf_file = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- set salt_service = 'venv-salt-minion' %}
{%- endif -%}

{%- set pattern = '^master:.*' %}

{% if salt['file.search'](conf_file, pattern) -%}

{{ conf_file }}:
  file.line:
    - mode: replace
    - match: "{{ pattern }}"
    - content: "master: {{ pillar['mgr_server'] }}"

restart:
  mgrcompat.module_run:
    - name: cmd.run_bg
    - cmd: "sleep 2; service {{ salt_service }} restart"
    - python_shell: true

{% else -%}

non_standard_conf:
  test.configurable_test_state:
    - changes: False
    - result: False
    - comment: "Can't change proxy. Salt master is not configured in {{ conf_file }}"

{% endif %}
   07070100000031000081B400000000000000000000000163F87E30000002BE000000000000000000000000000000000000003000000000susemanager-sls/salt/bootstrap/susemanager.conf   # This file was generated by SUSE Manager
master: {{ pillar['mgr_server'] }}
server_id_use_crc: adler32
enable_legacy_startup_events: False
enable_fqdns_grains: False
{% if pillar['activation_key'] is defined or pillar['management_key'] is defined %}
grains:
  susemanager:
{%- if pillar['activation_key'] is defined %}
    activation_key: {{ pillar['activation_key'] }}
{%- endif %}
{%- if pillar['management_key'] is defined %}
    management_key: {{ pillar['management_key'] }}
{%- endif %}
{% endif %}
start_event_grains:
  - machine_id
  - saltboot_initrd
  - susemanager

# Define SALT_RUNNING env variable for pkg modules
system-environment:
  modules:
    pkg:
      _:
        SALT_RUNNING: 1
  07070100000032000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001B00000000susemanager-sls/salt/certs    07070100000033000081B400000000000000000000000163F87E3000000197000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/debian.sls mgr_download_mgr_cert:
  file.managed:
    - name: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
    - makedirs: True
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

mgr_update_ca_certs:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
 07070100000034000081B400000000000000000000000163F87E30000000A5000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/init.sls   {% macro includesls(os_family) -%}
{% include 'certs/{0}.sls'.format(os_family) -%}
{%- endmacro %}
{% set sls = includesls(grains['os_family']|lower) -%}
{{ sls }}
   07070100000035000081B400000000000000000000000163F87E3000000284000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/redhat.sls {%- if grains['osrelease']|int == 6 %}
enable_ca_store:
  cmd.run:
    - name: /usr/bin/update-ca-trust enable
    - runas: root
    - unless: "/usr/bin/update-ca-trust check | grep \"PEM/JAVA Status: ENABLED\""
{%- endif %}
/etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT
{%- if grains['osrelease']|int == 6 %}
    - require:
      - cmd: enable_ca_store
{%- endif %}
update-ca-certificates:
  cmd.run:
    - name: /usr/bin/update-ca-trust extract
    - runas: root
    - onchanges:
      - file: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT

07070100000036000081FD00000000000000000000000163F87E300000032F000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/suse.sls   {%- if grains['osrelease']|int == 11 %}
/etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem:
{%- else %}
/etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT:
{%- endif %}
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

{%- if grains['osrelease']|int == 11 %}
salt://certs/update-multi-cert.sh:
  cmd.wait_script:
    - runas: root
    - watch:
        - file: /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem

c_rehash:
  cmd.run:
    - name: /usr/bin/c_rehash
    - runas: root
    - onchanges:
      - file: /etc/ssl/certs/*
{%- else %}

update-ca-certificates:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
    - retry:
        attempts: 5
        interval: 5
        until: True
{%- endif %}
 07070100000037000081B400000000000000000000000163F87E300000018B000000000000000000000000000000000000003000000000susemanager-sls/salt/certs/update-multi-cert.sh   CERT_DIR=/etc/ssl/certs
CERT_FILE=RHN-ORG-TRUSTED-SSL-CERT
TRUST_DIR=/etc/ssl/certs
rm -f $TRUST_DIR/${CERT_FILE}-*.pem
if [ -f $CERT_DIR/${CERT_FILE}.pem ]; then
    if [ $(grep -- "-----BEGIN CERTIFICATE-----" $CERT_DIR/${CERT_FILE}.pem | wc -l) -gt 1 ]; then
        csplit -b "%02d.pem" -f $TRUST_DIR/${CERT_FILE}- $CERT_DIR/${CERT_FILE}.pem '/-----BEGIN CERTIFICATE-----/' '{*}'
    fi
fi

 07070100000038000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/channels 07070100000039000081B400000000000000000000000163F87E3000000288000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/aptauth.conf    # susemanager.conf managed by SUSE Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {})|dictsort|reverse %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
machine {{hostname}}:{{port}}/rhn/manager/download/dists/{{ chan }} login {{ args['token'] }}
machine {{hostname}}:{{port}}/rhn/manager/download/{{ chan }} login {{ args['token'] }}
{% endfor %}
0707010000003A000081B400000000000000000000000163F87E3000000848000000000000000000000000000000000000002C00000000susemanager-sls/salt/channels/channels.repo   # Channels managed by SUSE Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {}).items() %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
{%- if grains['os'] == 'Debian' or grains['os'] == 'Ubuntu' %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_support_acd = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}

{%- if apt_support_acd %}
deb {{ '[trusted=yes]' if not pillar.get('mgr_metadata_signing_enabled', false) else '[signed-by=/usr/share/keyrings/mgr-archive-keyring.gpg]' }} {{protocol}}://{{hostname}}:{{port}}/rhn/manager/download {{ chan }} main
{%- else %}
deb {{ '[trusted=yes]' if not pillar.get('mgr_metadata_signing_enabled', false) else '[signed-by=/usr/share/keyrings/mgr-archive-keyring.gpg]' }} {{protocol}}://{{ args['token'] }}@{{hostname}}:{{port}}/rhn/manager/download {{ chan }} main
{%- endif %}
{%- else %}
[{{ args['alias'] }}]
name={{ args['name'] }}
enabled={{ args['enabled'] }}
{%- if grains['os_family'] == 'RedHat' %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}
susemanager_token={{ args['token'] }}
gpgcheck={{ 1 if args['gpgcheck'] == "1" or args['pkg_gpgcheck'] != "0" else 0 }}
repo_gpgcheck={{ args['gpgcheck'] }}
{%- if salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
gpgkey=https://{{ args['host'] }}/pub/mgr-gpg-pub.key
{%- endif %}
{%- if grains['osmajorrelease'] >= 8 and args['cloned_nonmodular'] %}
module_hotfixes=1
{%- endif %}
{%- else %}
autorefresh={{ args['autorefresh'] }}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}?{{ args['token'] }}
gpgcheck={{ args['gpgcheck'] }}
repo_gpgcheck={{ args['repo_gpgcheck'] }}
pkg_gpgcheck={{ args['pkg_gpgcheck'] }}
{%- endif %}
type={{ args['type'] }}
{%- endif %}

{% endfor %}
0707010000003B000081B400000000000000000000000163F87E30000005AC000000000000000000000000000000000000003400000000susemanager-sls/salt/channels/disablelocalrepos.sls   # Disable all local repos matching or not matching the 'match_str'
# Default arguments: everything except *susemanager:*
{% if not repos_disabled is defined %}
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': false} %}
{% endif %}
{% do repos_disabled.update({'count': 0}) %}

{% if salt['config.get']('disable_local_repos', True) %}
{% set repos = salt['pkg.list_repos']() %}
{% for alias, data in repos.items() %}
{% if grains['os_family'] == 'Debian' %}
{% for entry in data %}
{% if (repos_disabled.match_str in entry['file'])|string == repos_disabled.matching|string and entry.get('enabled', True) %} 
disable_repo_{{ repos_disabled.count }}:
  mgrcompat.module_run:
    - name: pkg.mod_repo
    - repo: {{ "'" ~ entry.line ~ "'" }}
    - kwargs:
        disabled: True
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endfor %}
{% else %}
{% if (repos_disabled.match_str in alias)|string == repos_disabled.matching|string and data.get('enabled', True) in [True, '1'] %}
disable_repo_{{ alias }}:
  mgrcompat.module_run:
    - name: pkg.mod_repo
    - repo: {{ alias }}
    - kwargs:
        enabled: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
0707010000003C000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/dnf-susemanager-plugin  0707010000003D000081B400000000000000000000000163F87E3000000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   0707010000003E000081B400000000000000000000000163F87E30000001E0000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.py import dnf

class Susemanager(dnf.Plugin):

    name = 'susemanager'

    def __init__(self, base, cli):
        super(Susemanager, self).__init__(base, cli)

    def config(self):
        for repo in self.base.repos.get_matching("susemanager:*"):
            try:
                susemanager_token = repo.cfg.getValue(section=repo.id, key="susemanager_token")
                repo.set_http_headers(["X-Mgr-Auth: %s" % susemanager_token])
            except:
                pass
0707010000003F000081B400000000000000000000000163F87E30000008A7000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/gpg-keys.sls    {%- set mgr_server = salt['pillar.get']('mgr_server')%}
{%- set port = salt['pillar.get']('mgr_server_https_port', 443)%}

{%- if salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
{%- if grains['os_family'] == 'Debian' %}
mgr_debian_repo_keyring:
  file.managed:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
    - source: salt://gpg/mgr-keyring.gpg
    - mode: 644
{% else %}
mgr_trust_customer_gpg_key:
  cmd.run:
    - name: rpm --import https://{{mgr_server}}:{{port}}/pub/mgr-gpg-pub.key
    - runas: root
{%- endif %}
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
trust_res_gpg_key:
  cmd.run:
    - name: rpm --import https://{{mgr_server}}:{{port}}/pub/{{ salt['pillar.get']('gpgkeys:res:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res:name') }}
    - runas: root

trust_suse_manager_tools_rhel_gpg_key:
  cmd.run:
{%- if grains['osmajorrelease']|int == 6 %}
    - name: rpm --import https://{{mgr_server}}:{{port}}/pub/{{ salt['pillar.get']('gpgkeys:res6tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res6tools:name') }}
{%- elif grains['osmajorrelease']|int == 7 %}
    - name: rpm --import https://{{mgr_server}}:{{port}}/pub/{{ salt['pillar.get']('gpgkeys:res7tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res7tools:name') }}
{%- elif grains['osmajorrelease']|int == 8 %}
    - name: rpm --import https://{{mgr_server}}:{{port}}/pub/{{ salt['pillar.get']('gpgkeys:res8tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res8tools:name') }}
{%- elif grains['osmajorrelease']|int == 2 and grains['os'] == 'Amazon' %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res7tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res7tools:name') }}
{% else %}
    - name: /usr/bin/true
{%- endif %}
    - runas: root

{%- elif grains['os_family'] == 'Debian' %}
install_gnupg_debian:
  pkg.latest:
    - pkgs:
      - gnupg

trust_suse_manager_tools_deb_gpg_key:
  mgrcompat.module_run:
    - name: pkg.add_repo_key
    - path: https://{{mgr_server}}:{{port}}/pub/{{ salt['pillar.get']('gpgkeys:ubuntutools:file') }}
{%- endif %}
 07070100000040000081B400000000000000000000000163F87E30000010BC000000000000000000000000000000000000002700000000susemanager-sls/salt/channels/init.sls    include:
  - util.syncstates

{%- if grains['os_family'] == 'RedHat' %}

{%- set yum_version = salt['pkg.version']("yum") %}
{%- set is_yum = yum_version and salt['pkg.version_cmp'](yum_version, "4") < 0 %}
{%- set is_dnf = salt['pkg.version']("dnf") %}

{%- if is_dnf %}
{%- set dnf_plugins = salt['cmd.run']("find /usr/lib -type d -name dnf-plugins -printf '%T@ %p\n' | sort -nr | cut -d ' ' -s -f 2- | head -n 1", python_shell=True) %}
{%- if dnf_plugins %}
mgrchannels_susemanagerplugin_dnf:
  file.managed:
    - name: {{ dnf_plugins }}/susemanagerplugin.py
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_dnf:
  file.managed:
    - name: /etc/dnf/plugins/susemanagerplugin.conf
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644

mgrchannels_enable_dnf_plugins:
  file.replace:
    - name: /etc/dnf/dnf.conf
    - pattern: plugins=.*
    - repl: plugins=1
{#- default is '1' when option is not specififed #}
    - onlyif: grep -e 'plugins=0' -e 'plugins=False' -e 'plugins=no' /etc/dnf/dnf.conf
{%- endif %}
{%- endif %}

{%- if is_yum %}
mgrchannels_susemanagerplugin_yum:
  file.managed:
    - name: /usr/share/yum-plugins/susemanagerplugin.py
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_yum:
  file.managed:
    - name: /etc/yum/pluginconf.d/susemanagerplugin.conf
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644

mgrchannels_enable_yum_plugins:
  file.replace:
    - name: /etc/yum.conf
    - pattern: plugins=.*
    - repl: plugins=1
    - onlyif: grep plugins=0 /etc/yum.conf

{%- endif %}
{%- endif %}

mgrchannels_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: "/etc/zypp/repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'RedHat' %}
    - name: "/etc/yum.repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'Debian' %}
    - name: "/etc/apt/sources.list.d/susemanager:channels.list"
{%- endif %}
    - source:
      - salt://channels/channels.repo
    - template: jinja
    - user: root
    - group: root
    - mode: 644
{%- if grains['os_family'] == 'RedHat' %}
    - require:
{%- if is_dnf %}
       - file: mgrchannels_susemanagerplugin_dnf
       - file: mgrchannels_susemanagerplugin_conf_dnf
{%- endif %}
{%- if is_yum %}
       - file: mgrchannels_susemanagerplugin_yum
       - file: mgrchannels_susemanagerplugin_conf_yum
{%- endif %}
{%- endif %}

{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_support_acd = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}

{%- if apt_support_acd %}
aptauth_conf:
  file.managed:
    - name: "/etc/apt/auth.conf.d/susemanager.conf"
    - source:
      - salt://channels/aptauth.conf
    - template: jinja
    - user: _apt
    - group: root
    - mode: 600
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
{%- if is_dnf %}
mgrchannels_dnf_clean_all:
  cmd.run:
    - name: /usr/bin/dnf clean all
    - runas: root
    - onchanges:
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/dnf repolist | grep \"repolist: 0$\""
{%- endif %}
{%- if is_yum %}
mgrchannels_yum_clean_all:
  cmd.run:
    - name: /usr/bin/yum clean all
    - runas: root
    - onchanges: 
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/yum repolist | grep \"repolist: 0$\""
{%- endif %}
{%- endif %}

{%- if not salt['pillar.get']('susemanager:distupgrade:dryrun', False) %}
{%- if grains['os_family'] == 'Suse' and grains['osmajorrelease']|int > 11 and not grains['oscodename'] == 'openSUSE Leap 15.3' %}
mgrchannels_install_products:
  product.all_installed:
    - require:
      - file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{%- endif %}

{% include 'channels/gpg-keys.sls' %}
{%- endif %}
07070100000041000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/yum-susemanager-plugin  07070100000042000081B400000000000000000000000163F87E3000000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   07070100000043000081B400000000000000000000000163F87E30000001B2000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.py from yum.plugins import TYPE_CORE
from yum import config

requires_api_version = '2.5'
plugin_type = TYPE_CORE


def config_hook(conduit):
    config.RepoConf.susemanager_token = config.Option()


def init_hook(conduit):
    for repo in conduit.getRepos().listEnabled():
        susemanager_token = getattr(repo, 'susemanager_token', None)
        if susemanager_token:
            repo.http_headers['X-Mgr-Auth'] = susemanager_token
  07070100000044000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002400000000susemanager-sls/salt/cleanup_minion   07070100000045000081B400000000000000000000000163F87E30000002F0000000000000000000000000000000000000002D00000000susemanager-sls/salt/cleanup_minion/init.sls  {%- if grains['os_family'] == 'RedHat' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/yum.repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/zypp/repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Debian' %}
mgrchannels_repo_clean_channels:
  file.absent:
    - name: /etc/apt/sources.list.d/susemanager:channels.list
mgrchannels_repo_clean_auth:
  file.absent:
    - name: /etc/apt/auth.conf.d/susemanager.conf

mgrchannels_repo_clean_keyring:
  file.absent:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
{%- endif %}

mgr_mark_no_longer_managed:
  file.absent:
    - name: /etc/sysconfig/rhn/systemid
07070100000046000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002800000000susemanager-sls/salt/cleanup_ssh_minion   07070100000047000081B400000000000000000000000163F87E300000059F000000000000000000000000000000000000003100000000susemanager-sls/salt/cleanup_ssh_minion/init.sls  include:
    - cleanup_minion

{%- set mgr_sudo_user = salt['pillar.get']('mgr_sudo_user') or 'root' %}
{%- set home = salt['user.info'](mgr_sudo_user)['home'] %}

{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove server to localhost aliasing from /etc/hosts
mgr_remove_mgr_server_localhost_alias:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}
{%- endif %}

# remove server ssh authorization
mgr_remove_mgr_ssh_identity:
  ssh_auth.absent:
    - user: {{ mgr_sudo_user }}
    - source: salt://salt_ssh/mgr_ssh_id.pub

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove proxy ssh authorization (if any)
mgr_remove_proxy_ssh_identity:
  ssh_auth.absent:
    - user: {{ mgr_sudo_user }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
{%- endif %}

# remove own key authorization
mgr_no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ mgr_sudo_user }}
    - source: {{ home }}/.ssh/mgr_own_id.pub

# remove own keys
mgr_remove_own_ssh_pub_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - ssh_auth: mgr_no_own_key_authorized

mgr_remove_own_ssh_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id

# Remove logrotate configuration
mgr_remove_logrotate_configuration:
  file.absent:
    - name: /etc/logrotate.d/salt-ssh
 07070100000048000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/clusters 07070100000049000081B400000000000000000000000163F87E3000000518000000000000000000000000000000000000002A00000000susemanager-sls/salt/clusters/addnode.sls {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_addnode:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
{%- for node in params.nodes %}
{%- set addparams = {'node_name': node.node_name, 'target': node.target, 'role': params.role, 'user': params.user, 'skuba_cluster_path': params.skuba_cluster_path } %}
mgr_cluster_add_node_{{ node.node_name }}:
  mgrcompat.module_run:
    - name: mgrclusters.add_node
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ addparams }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar['params'].get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_addnode
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('join', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}
{%- endfor %}

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('join', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('join', {}).get('after', [])%}
  - {{ hook }}
{%- endfor %}
0707010000004A000081B400000000000000000000000163F87E30000002C6000000000000000000000000000000000000003000000000susemanager-sls/salt/clusters/createcluster.sls   {%- if pillar.get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_createcluster:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['ssh_auth_sock'] }}
{%- endif %}

mgr_cluster_create_cluster:
  mgrcompat.module_run:
    - name: mgrclusters.create_cluster
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ pillar['params'] }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_createcluster
   {%- endif %}

include:
  - util.syncmodules
  0707010000004B000081B400000000000000000000000163F87E30000002D8000000000000000000000000000000000000002C00000000susemanager-sls/salt/clusters/listnodes.sls   {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_listnodes:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

mgr_cluster_list_nodes:
  mgrcompat.module_run:
    - name: mgrclusters.list_nodes
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ pillar['params'] }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar['params'].get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_listnodes
   {%- endif %}

include:
  - util.syncmodules
0707010000004C000081B400000000000000000000000163F87E3000000510000000000000000000000000000000000000002D00000000susemanager-sls/salt/clusters/removenode.sls  {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_removenode:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
{%- for node in params.nodes %}
{%- set removeparams = {'node_name': node.node_name, 'skuba_cluster_path': params.skuba_cluster_path, 'drain_timeout': params.drain_timeout } %}
mgr_cluster_remove_node_{{ node.node_name }}:
  mgrcompat.module_run:
    - name: mgrclusters.remove_node
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ removeparams }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_removenode
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}   
{%- endfor %}

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('after', []) %}
  - {{ hook }}
{%- endfor %}
0707010000004D000081B400000000000000000000000163F87E3000000437000000000000000000000000000000000000003100000000susemanager-sls/salt/clusters/upgradecluster.sls  {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_upgradecluster:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
mgr_cluster_upgrade_cluster:
  mgrcompat.module_run:
    - name: mgrclusters.upgrade_cluster
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ params }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_upgradecluster
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}    

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('upgrade', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('upgrade', {}).get('after', []) %}
  - {{ hook }}
{%- endfor %}
 0707010000004E000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002300000000susemanager-sls/salt/configuration    0707010000004F000081B400000000000000000000000163F87E30000002D5000000000000000000000000000000000000003400000000susemanager-sls/salt/configuration/deploy_files.sls   {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000050000081B400000000000000000000000163F87E30000002D5000000000000000000000000000000000000003200000000susemanager-sls/salt/configuration/diff_files.sls {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000051000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/custom   07070100000052000081B400000000000000000000000163F87E3000000036000000000000000000000000000000000000002500000000susemanager-sls/salt/custom/init.sls  include:
  - custom.custom_{{ grains['machine_id'] }}
  07070100000053000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002300000000susemanager-sls/salt/custom_groups    07070100000054000081B400000000000000000000000163F87E3000000091000000000000000000000000000000000000002C00000000susemanager-sls/salt/custom_groups/init.sls   {% if pillar.get('group_ids', []) -%}
include:
{% for gid in pillar.get('group_ids', []) -%}
  - custom.group_{{ gid }}
{% endfor %}
{% endif %}
   07070100000055000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/salt/custom_org   07070100000056000081B400000000000000000000000163F87E3000000060000000000000000000000000000000000000002900000000susemanager-sls/salt/custom_org/init.sls  {% if pillar['org_id'] is defined %}
include:
  - custom.org_{{ pillar['org_id'] }}
{% endif %}
07070100000057000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002100000000susemanager-sls/salt/distupgrade  07070100000058000081B400000000000000000000000163F87E3000000218000000000000000000000000000000000000002A00000000susemanager-sls/salt/distupgrade/init.sls {% if grains['os_family'] == 'Suse' %}
spmigration:
  mgrcompat.module_run:
    - name: pkg.upgrade
    - dist_upgrade: True
    - dryrun: {{ salt['pillar.get']('susemanager:distupgrade:dryrun', False) }}
{% if grains['osrelease_info'][0] >= 12 %}
    - novendorchange: {{ not salt['pillar.get']('susemanager:distupgrade:allowVendorChange', False) }}
{% else %}
    - fromrepo: {{ salt['pillar.get']('susemanager:distupgrade:channels', []) }}
{% endif %}
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
07070100000059000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/hardware 0707010000005A000081B400000000000000000000000163F87E30000010A4000000000000000000000000000000000000003000000000susemanager-sls/salt/hardware/profileupdate.sls   {%- if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64', 'aarch64'] %}
mgr_install_dmidecode:
  pkg.installed:
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] %}
    - name: pmtools
{%- else %}
    - name: dmidecode
{%- endif %}
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{%- endif %}

grains:
  mgrcompat.module_run:
    - name: grains.items
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
cpuinfo:
  mgrcompat.module_run:
    - name: status.cpuinfo
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
udev:
  mgrcompat.module_run:
    - name: udev.exportdb
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
network-interfaces:
  mgrcompat.module_run:
    - name: network.interfaces
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
network-ips:
  mgrcompat.module_run:
    - name: sumautil.primary_ips
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_states
      - mgrcompat: sync_modules
{%- endif %}
network-modules:
  mgrcompat.module_run:
    - name: sumautil.get_net_modules
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_states
      - mgrcompat: sync_modules
{%- endif %}

{% if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64'] %}
smbios-records-bios:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 0
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-system:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 1
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-baseboard:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 2
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-chassis:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 3
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% elif grains['cpuarch'] in ['s390', 's390x'] %}
mainframe-sysinfo:
  mgrcompat.module_run:
    - name: mainframesysinfo.read_values
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% endif %}

{%- if grains['saltversioninfo'][0] >= 2018 %}
{% if 'mgrnet.dns_fqdns' in salt %}
dns_fqdns:
  mgrcompat.module_run:
    - name: mgrnet.dns_fqdns
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
    - onlyif:
        which host || which nslookup
{% endif%}
{% if 'network.fqdns' in salt %}
fqdns:
  mgrcompat.module_run:
    - name: network.fqdns
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% endif%}
{%- endif%}

include:
  - util.syncstates
  - util.syncmodules
0707010000005B000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/images   0707010000005C000081B400000000000000000000000163F87E30000008B7000000000000000000000000000000000000002700000000susemanager-sls/salt/images/docker.sls    {% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login:
  mgrcompat.module_run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_buildimage:
  mgrcompat.module_run:
    - name: docker.build
{%- if pillar.get('imagerepopath') is defined %}
    - repository: "{{ pillar.get('imagerepopath') }}"
    - tag: "{{ pillar.get('imagetag', 'latest') }}"
{%- else %}
    - repository: "{{ pillar.get('imagename') }}"
    - tag: "{{ pillar.get('imagename').rsplit(':', 1)[1] }}"
{%- endif %}
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
        {{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
    - require:
      - mgrcompat: mgr_registries_login

mgr_pushimage:
  mgrcompat.module_run:
    - name: docker.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_buildimage
      - mgrcompat: mgr_registries_login

{% if 'docker.logout' in salt %}

mgr_registries_logout:
  mgrcompat.module_run:
    - name: docker.logout
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}
    - require:
      - mgrcompat: mgr_pushimage
      - mgrcompat: mgr_registries_login

{% endif %}

{% else %}

mgr_registries_login:
  mgrcompat.module_run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_buildimage:
  mgrcompat.module_run:
    - name: dockerng.build
    - image: "{{ pillar.get('imagename') }}"
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
        {{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
    - require:
      - mgrcompat: mgr_registries_login

mgr_pushimage:
  mgrcompat.module_run:
    - name: dockerng.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_buildimage
      - mgrcompat: mgr_registries_login

{% endif %}
 0707010000005D000081B400000000000000000000000163F87E300000163D000000000000000000000000000000000000003100000000susemanager-sls/salt/images/kiwi-image-build.sls  # SUSE Manager for Retail build trigger
#

{%- set source     = pillar.get('source') %}

{%- set kiwi_dir   = '/var/lib/Kiwi/' %}
{%- set common_repo = kiwi_dir + 'repo' %}

{%- set root_dir   = kiwi_dir + pillar.get('build_id') %}
{%- set source_dir = root_dir + '/source' %}
{%- set chroot_dir = root_dir + '/chroot/' %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
# cache dir is used only with Kiwi-ng
{%- set cache_dir  = root_dir + '/cache/' %}
{%- set bundle_id  = pillar.get('build_id') %}
{%- set activation_key = pillar.get('activation_key') %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}

mgr_buildimage_prepare_source:
  file.directory:
    - name: {{ root_dir }}
    - clean: True
  mgrcompat.module_run:
    - name: kiwi_source.prepare_source
    - source: {{ source }}
    - root: {{ root_dir }}

mgr_buildimage_prepare_activation_key_in_source:
  file.managed:
    - name: {{ source_dir }}/root/etc/salt/minion.d/kiwi_activation_key.conf
    - makedirs: True
    - contents: |
        grains:
          susemanager:
            activation_key: {{ activation_key }}

{%- if use_kiwi_ng %}
# KIWI NG
#
{%- set kiwi = 'kiwi-ng' %}

{%- set kiwi_options = pillar.get('kiwi_options', '') %}
{%- set bootstrap_packages = ['findutils', 'rhn-org-trusted-ssl-cert-osimage'] %}

{%- macro kiwi_params() -%}
  --ignore-repos-used-for-build --add-repo file:{{ common_repo }},rpm-dir,common_repo,90,false,false
{% for pkg in bootstrap_packages -%}
  --add-bootstrap-package {{ pkg }}
{% endfor -%}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }},rpm-md,key_repo{{ loop.index }},90,false,false {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/prepare.log --shared-cache-dir={{ cache_dir }} {{ kiwi_options }} system prepare --description {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - mgrcompat: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/create.log --shared-cache-dir={{ cache_dir }} {{ kiwi_options }} system create --root {{ chroot_dir }} --target-dir  {{ dest_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} result bundle --target-dir {{ dest_dir }} --id {{ bundle_id }} --bundle-dir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create


{%- else %}
# KIWI Legacy
#

{%- set kiwi_help = salt['cmd.run']('kiwi --help') %}
{%- set have_bundle_build = kiwi_help.find('--bundle-build') > 0 %}

# i586 build on x86_64 host must be called with linux32
# let's consider the build i586 if there is no x86_64 repo specified
{%- set kiwi = 'linux32 kiwi' if (pillar.get('kiwi_repositories')|join(' ')).find('x86_64') == -1 and grains.get('osarch') == 'x86_64' else 'kiwi' %}

# in SLES11 Kiwi the --add-repotype is required
{%- macro kiwi_params() -%}
  --add-repo {{ common_repo }} --add-repotype rpm-dir --add-repoalias common_repo {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }} --add-repotype rpm-md --add-repoalias key_repo{{ loop.index }} {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

# old Kiwi can't change cache location, so we have to clear cache before each build
mgr_kiwi_clear_cache:
  file.directory:
    - name: /var/cache/kiwi/
    - makedirs: True
    - clean: True

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --force-new-root --prepare {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - mgrcompat: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --create {{ chroot_dir }} --dest {{ dest_dir }} {{ kiwi_params() }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

{%- if have_bundle_build %}
mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --bundle-build {{ dest_dir }} --bundle-id {{ bundle_id }} --destdir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create

{%- else %}

# SLE11 Kiwi does not have --bundle-build option, we have to create the bundle tarball ourselves:

mgr_buildimage_kiwi_bundle_dir:
  file.directory:
    - name: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_create

mgr_buildimage_kiwi_bundle_tarball:
  cmd.run:
    - name: "cd '{{ dest_dir }}' && tar czf '{{ bundle_dir }}'`basename *.packages .packages`-{{ bundle_id }}.tgz --no-recursion `find . -maxdepth 1 -type f`"
    - require:
      - file: mgr_buildimage_kiwi_bundle_dir

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "cd '{{ bundle_dir }}' && sha256sum *.tgz > `echo *.tgz`.sha256"
    - require:
      - cmd: mgr_buildimage_kiwi_bundle_tarball

{%- endif %}

{%- endif %}


{%- if pillar.get('use_salt_transport') %}
mgr_buildimage_kiwi_collect_image:
  mgrcompat.module_run:
    - name: cp.push_dir
    - path: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_bundle
{%- endif %}

mgr_buildimage_info:
  mgrcompat.module_run:
    - name: kiwi_info.image_details
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}
    - require:
{%- if pillar.get('use_salt_transport') %}
      - mgr_buildimage_kiwi_collect_image
{%- else %}
      - mgr_buildimage_kiwi_bundle
{%- endif %}
   0707010000005E000081B400000000000000000000000163F87E300000025C000000000000000000000000000000000000003300000000susemanager-sls/salt/images/kiwi-image-inspect.sls    # SUSE Manager for Retail build trigger
#
{%- set root_dir   = '/var/lib/Kiwi/' + pillar.get('build_id') %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
{%- set bundle_id  = pillar.get('build_id') %}

# the goal is to collect all information required for
# saltboot image pillar

mgr_inspect_kiwi_image:
  mgrcompat.module_run:
    - name: kiwi_info.inspect_image
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}

mgr_kiwi_cleanup:
  cmd.run:
    - name: "rm -rf '{{ root_dir }}'"
    - require:
      - mgrcompat: mgr_inspect_kiwi_image
0707010000005F000081B400000000000000000000000163F87E30000009D1000000000000000000000000000000000000002E00000000susemanager-sls/salt/images/profileupdate.sls {% set container_name = salt['pillar.get']('mgr_container_name', 'mgr_container_' ~ range(1, 10000) | random )  %}

{% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login_inspect:
  mgrcompat.module_run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_image_profileupdate:
  mgrcompat.module_run:
    - name: docker.sls_build
    - repository: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_image_inspect:
  mgrcompat.module_run:
    - name: docker.inspect_image
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_container_remove:
  mgrcompat.module_run:
    - name: docker.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  mgrcompat.module_run:
    - name: docker.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% if 'docker.logout' in salt %}

mgr_registries_logout:
  mgrcompat.module_run:
    - name: docker.logout
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}
    - require:
      - mgrcompat: mgr_registries_login_inspect
      - mgrcompat: mgr_image_profileupdate

{% endif %}

{% else %}

mgr_registries_login_inspect:
  mgrcompat.module_run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_image_profileupdate:
  mgrcompat.module_run:
    - name: dockerng.sls_build
    - m_name: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_image_inspect:
  mgrcompat.module_run:
    - name: dockerng.inspect
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_container_remove:
  mgrcompat.module_run:
    - name: dockerng.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  mgrcompat.module_run:
    - name: dockerng.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% endif %}

include:
  - util.syncstates
   07070100000060000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/packages 07070100000061000081B400000000000000000000000163F87E30000001E1000000000000000000000000000000000000002700000000susemanager-sls/salt/packages/init.sls    {%- if grains['os_family'] == 'Suse' and grains['osmajorrelease']|int > 11 and not grains['oscodename'] == 'openSUSE Leap 15.3'%}
mgr_install_products:
  product.all_installed:
    - refresh: True
    - require:
      - file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{%- endif %}

include:
  - util.syncstates
  - .packages_{{ grains['machine_id'] }}
   07070100000062000081B400000000000000000000000163F87E3000000158000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/patchdownload.sls   {% if pillar.get('param_patches', []) %}
pkg_downloaded-patches:
  pkg.patch_downloaded:
    - advisory_ids:
{%- for patch in pillar.get('param_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - require:
      - mgrcompat: applychannels
{% endif %}

applychannels:
    mgrcompat.module_run:
    -  name: state.apply
    -  mods: channels
07070100000063000081B400000000000000000000000163F87E30000004D3000000000000000000000000000000000000002F00000000susemanager-sls/salt/packages/patchinstall.sls    {% if grains.get('saltversioninfo', []) < [2015, 8, 12] %}
{{ salt.test.exception("You are running an old version of salt-minion that does not support patching. Please update salt-minion and try again.") }}
{% endif %}

{% if pillar.get('param_update_stack_patches', []) %}
mgr_update_stack_patches:
  pkg.patch_installed:
    - refresh: true
    - advisory_ids:
{%- for patch in pillar.get('param_update_stack_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% endif %}

{% if pillar.get('param_regular_patches', []) %}
mgr_regular_patches:
  pkg.patch_installed:
{% if not pillar.get('param_update_stack_patches', []) %}
    - refresh: true
{% endif %}
    - novendorchange:  {{ not pillar.get('allow_vendor_change', False) }}
    - advisory_ids:
{%- for patch in pillar.get('param_regular_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% if pillar.get('param_update_stack_patches', []) %}
        - pkg: mgr_update_stack_patches
{% endif %}
{% endif %}

include:
  - channels
 07070100000064000081B400000000000000000000000163F87E3000000207000000000000000000000000000000000000002E00000000susemanager-sls/salt/packages/pkgdownload.sls {% if pillar.get('param_pkgs') %}
pkg_downloaded:
  pkg.downloaded:
    - pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    - require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
 07070100000065000081B400000000000000000000000163F87E30000002F6000000000000000000000000000000000000002D00000000susemanager-sls/salt/packages/pkginstall.sls  {% if pillar.get('param_pkgs') %}
pkg_installed:
  pkg.installed:
    -   refresh: true
{%- if grains['os_family'] == 'Debian' %}
    - skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
{%- endif %}
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}

{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
  07070100000066000081B400000000000000000000000163F87E30000000E1000000000000000000000000000000000000002A00000000susemanager-sls/salt/packages/pkglock.sls pkg_locked:
  pkg.held:
    - replace: True
{% if pillar.get('param_pkgs') %}
    - pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
        - {{ pkg }}
{%- endfor %}
{%- else %}
    - pkgs: []
{% endif %}
   07070100000067000081B400000000000000000000000163F87E3000000417000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgremove.sls   {% if pillar.get('param_pkgs') %}
pkg_removed:
  pkg.removed:
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    -   require:
        - file: mgrchannels*
{% endif %}

{% if pillar.get('param_pkgs_duplicates') %}
{% for pkg, arch, version in pillar["param_pkgs_duplicates"] %}
pkg_removed_dup_{{ loop.index0 }}:
  pkg.removed:
    -   pkgs:
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
    -   require:
        - file: mgrchannels*

{% endfor %}
{% endif %}

include:
  - channels
 07070100000068000081B400000000000000000000000163F87E300000014B000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgupdate.sls   include:
  - channels

mgr_pkg_update:
  pkg.uptodate:
    - refresh: True
{%- if grains['os_family'] == 'Debian' %}
    - skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
{%- endif %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
      - file: mgrchannels*
 07070100000069000081B400000000000000000000000163F87E3000000519000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/profileupdate.sls   packages:
  mgrcompat.module_run:
    - name: pkg.info_installed
    - kwargs: {
          attr: 'status,arch,epoch,version,release,install_date_time_t',
{%- if grains.get('__suse_reserved_pkg_all_versions_support', False) %}
          errors: report,
          all_versions: true
{%- else %}
          errors: report
{%- endif %}
      }
{% if grains['os_family'] == 'Suse' %}
products:
  mgrcompat.module_run:
    - name: pkg.list_products
{% elif grains['os_family'] == 'RedHat' %}
{% include 'packages/redhatproductinfo.sls' %}
{% elif grains['os_family'] == 'Debian' %}
debianrelease:
  cmd.run:
    - name: cat /etc/os-release
    - onlyif: test -f /etc/os-release
{% endif %}

include:
  - util.syncgrains
  - util.syncstates
  - util.syncmodules

grains_update:
  mgrcompat.module_run:
    - name: grains.items
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_grains
{%- else %}
      - mgrcompat: sync_grains
{%- endif %}

{% if not pillar.get('imagename') %}
kernel_live_version:
  mgrcompat.module_run:
    - name: sumautil.get_kernel_live_version
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}
{% endif %}
   0707010000006A000081B400000000000000000000000163F87E30000003E0000000000000000000000000000000000000003400000000susemanager-sls/salt/packages/redhatproductinfo.sls   {% if grains['os_family'] == 'RedHat' %}
rhelrelease:
  cmd.run:
    - name: cat /etc/redhat-release
    - onlyif: test -f /etc/redhat-release -a ! -L /etc/redhat-release
alibabarelease:
  cmd.run:
    - name: cat /etc/alinux-release
    - onlyif: test -f /etc/alinux-release
centosrelease:
  cmd.run:
    - name: cat /etc/centos-release
    - onlyif: test -f /etc/centos-release
oraclerelease:
  cmd.run:
    - name: cat /etc/oracle-release
    - onlyif: test -f /etc/oracle-release
amazonrelease:
  cmd.run:
    - name: cat /etc/system-release
    - onlyif: test -f /etc/system-release && grep -qi Amazon /etc/system-release
almarelease:
  cmd.run:
    - name: cat /etc/almalinux-release
    - onlyif: test -f /etc/almalinux-release
rockyrelease:
  cmd.run:
    - name: cat /etc/rocky-release
    - onlyif: test -f /etc/rocky-release
respkgquery:
  cmd.run:
    - name: rpm -q --whatprovides 'sles_es-release-server'
    - onlyif: rpm -q --whatprovides 'sles_es-release-server'
{% endif %}
0707010000006B000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002400000000susemanager-sls/salt/remotecommands   0707010000006C000081B400000000000000000000000163F87E30000000DE000000000000000000000000000000000000002D00000000susemanager-sls/salt/remotecommands/init.sls  remote_command:
  cmd.script:
    - source: {{ pillar.get('mgr_remote_cmd_script') }}
    - runas: {{ pillar.get('mgr_remote_cmd_runas', 'root') }}
    - timeout: {{ pillar.get('mgr_remote_cmd_timeout') }}
    # TODO GID

  0707010000006D000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/scap 0707010000006E000081B400000000000000000000000163F87E30000005ED000000000000000000000000000000000000002300000000susemanager-sls/salt/scap/init.sls    mgr_scap:
  mgrcompat.module_run:
{%- if "openscap.xccdf_eval" not in salt %}
    - name: openscap.xccdf
    - params: {{ pillar.get('mgr_scap_params')['old_parameters'] }}
{%- else %}
    - name: openscap.xccdf_eval
    - xccdffile: {{ pillar['mgr_scap_params']['xccdffile'] }}
    {%- if "ovalfiles" in pillar.get('mgr_scap_params') %}
    - ovalfiles:
      {%- for oval in pillar['mgr_scap_params']['ovalfiles'] %}
        - {{ oval }}
      {%- endfor %}
    {%- endif %}
    - kwargs:
        results: results.xml
        report: report.html
        oval_results: True
        {%- if "profile" in pillar.get('mgr_scap_params') %}
        profile: {{ pillar['mgr_scap_params']['profile'] }}
        {%- endif %}
        {%- if "rule" in pillar.get('mgr_scap_params') %}
        rule: {{ pillar['mgr_scap_params']['rule'] }}
        {%- endif %}
        {%- if "remediate" in pillar.get('mgr_scap_params') %}
        remediate: {{ pillar['mgr_scap_params']['remediate'] }}
        {%- endif %}
        {%- if "fetch_remote_resources" in pillar.get('mgr_scap_params') %}
        fetch_remote_resources: {{ pillar['mgr_scap_params']['fetch_remote_resources'] }}
        {%- endif %}
        {%- if "tailoring_file" in pillar.get('mgr_scap_params') %}
        tailoring_file: {{ pillar['mgr_scap_params']['tailoring_file'] }}
        {%- endif %}
        {%- if "tailoring_id" in pillar.get('mgr_scap_params') %}
        tailoring_id: {{ pillar['mgr_scap_params']['tailoring_id'] }}
        {%- endif %}
{% endif %}
   0707010000006F000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/services 07070100000070000081B400000000000000000000000163F87E3000000416000000000000000000000000000000000000002900000000susemanager-sls/salt/services/docker.sls  {% if pillar['addon_group_types'] is defined and 'container_build_host' in pillar['addon_group_types'] %}
mgr_install_docker:
  pkg.installed:
    - pkgs:
      - git-core
      - docker: '>=1.9.0'
{%- if grains['pythonversion'][0] == 3 %}
    {%- if grains['osmajorrelease'] == 12 %}
      - python3-docker-py: '>=1.6.0'
    {%- else %}
      - python3-docker: '>=1.6.0'
    {%- endif %}
{%- else %}
      - python-docker-py: '>=1.6.0'
{%- endif %}
{%- if grains['saltversioninfo'][0] >= 2018 %}
      - python3-salt
    {%- if grains['saltversioninfo'][0] < 3002 and salt['pkg.info_available']('python-Jinja2', 'python2-Jinja2') and salt['pkg.info_available']('python', 'python2') and salt['pkg.info_available']('python2-salt') %}
      - python2-salt
    {%- endif %}
{%- endif %}

mgr_docker_service:
  service.running:
    - name: docker
    - enable: True
    - require:
      - pkg: mgr_install_docker

mgr_min_salt:
  pkg.installed:
    - pkgs:
      - salt: '>=2016.11.1'
      - salt-minion: '>=2016.11.1'
    - order: last
{% endif %}
  07070100000071000081B400000000000000000000000163F87E3000000A79000000000000000000000000000000000000003400000000susemanager-sls/salt/services/kiwi-image-server.sls   # Image Server installation state - part of SUSE Manager for Retail
#
# Copyright (c) 2017 - 2021 SUSE LLC

{% if pillar['addon_group_types'] is defined and 'osimage_build_host' in pillar['addon_group_types'] %}
{% set kiwi_dir = '/var/lib/Kiwi' %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}
{%- set available_packages = salt['pkg.search']('kiwi').keys() %}

{%- if use_kiwi_ng %}
mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - python3-kiwi
{%- if 'kiwi-systemdeps-disk-images' in available_packages %}
      - kiwi-systemdeps-disk-images
      - kiwi-systemdeps-image-validation
      - kiwi-systemdeps-iso-media
{%- endif %}
{%- if 'kiwi-systemdeps-containers' in available_packages %}
      - kiwi-systemdeps-containers
{%- endif %}
      - kiwi-boot-descriptions
{%- else %}
{% set kiwi_boot_modules = ['kiwi-desc-netboot', 'kiwi-desc-saltboot', 'kiwi-desc-vmxboot', 'kiwi-desc-oemboot', 'kiwi-desc-isoboot'] %}

mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - kiwi
{% for km in kiwi_boot_modules %}
    {% if km in available_packages %}
      - {{ km }}
    {% endif %}
{% endfor %}
{% endif %}

mgr_kiwi_build_tools:
  pkg.installed:
    - pkgs:
      - git-core

mgr_kiwi_dir_created:
  file.directory:
    - name: {{ kiwi_dir }}
    - user: root
    - group: root
    - dir_mode: 755

# repo for common kiwi build needs - mainly RPM with SUSE Manager certificate
mgr_kiwi_dir_repo_created:
  file.directory:
    - name: {{ kiwi_dir }}/repo
    - user: root
    - group: root
    - dir_mode: 755

mgr_osimage_cert_deployed:
  file.managed:
{%- if grains.get('osfullname') == 'SLES' and grains.get('osmajorrelease') == '11' %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
{%- else %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
{%- endif %}

mgr_sshd_installed_enabled:
  pkg.installed:
    - name: openssh
  service.running:
    - name: sshd
    - enable: True

mgr_sshd_public_key_copied:
  file.append:
    - name: /root/.ssh/authorized_keys
    - source: salt://salt_ssh/mgr_ssh_id.pub
    - makedirs: True
    - require:
      - pkg: mgr_sshd_installed_enabled

mgr_saltutil_synced:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_all
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_all
{%- endif %}

{% endif %}
   07070100000072000081B400000000000000000000000163F87E3000000644000000000000000000000000000000000000002E00000000susemanager-sls/salt/services/salt-minion.sls {% include 'bootstrap/remove_traditional_stack.sls' %}
{%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}

{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}

mgr_salt_minion_inst:
  pkg.installed:
    - name: {{ salt_minion_name }}
    - order: last

{{ susemanager_minion_config }}:
  file.managed:
    - source:
      - salt://bootstrap/susemanager.conf
    - template: jinja
    - mode: 644
    - order: last
    - require:
      - pkg: mgr_salt_minion_inst

mgr_salt_minion_run:
  service.running:
    - name: {{ salt_minion_name }}
    - enable: True
    - order: last

{% endif %}

{%- if salt['pillar.get']('contact_method') in ['ssh-push', 'ssh-push-tunnel'] %}
logrotate_configuration:
  file.managed:
    - name: /etc/logrotate.d/salt-ssh
    - user: root
    - group: root
    - mode: 644
    - makedirs: True
    - contents: |
        /var/log/salt-ssh.log {
                su root root
                missingok
                size 10M
                rotate 7
                compress
                notifempty
        }
{% endif %}

{# ensure /etc/sysconfig/rhn/systemid is created to indicate minion is managed by SUSE Manager #}
/etc/sysconfig/rhn/systemid:
  file.managed:
    - mode: 0640
    - makedirs: True
    - replace: False
07070100000073000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002300000000susemanager-sls/salt/srvmonitoring    07070100000074000081B400000000000000000000000163F87E30000004C8000000000000000000000000000000000000002F00000000susemanager-sls/salt/srvmonitoring/disable.sls    node_exporter_service:
  service.dead:
    - name: prometheus-node_exporter
    - enable: False

postgres_exporter_service:
  service.dead:
    - name: prometheus-postgres_exporter
    - enable: False

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{% include 'srvmonitoring/removejmxprops.sls' %}

jmx_tomcat_config:
  file.absent:
    - name: /usr/lib/systemd/system/tomcat.service.d/jmx.conf
  mgrcompat.module_run:
    - name: service.systemctl_reload

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_taskomatic_config:
  file.absent:
    - name: /usr/lib/systemd/system/taskomatic.service.d/jmx.conf
  mgrcompat.module_run:
    - name: service.systemctl_reload

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 0/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 0' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_disabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 0' /etc/rhn/rhn.conf
07070100000075000081B400000000000000000000000163F87E3000000F6D000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/enable.sls node_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info golang-github-prometheus-node_exporter

node_exporter_service:
  service.running:
    - name: prometheus-node_exporter
    - enable: True
    - require:
      - cmd: node_exporter

{% set global = namespace(has_pillar_data = True) %}
{% for key in ['db_name', 'db_host', 'db_port', 'db_user', 'db_pass'] if global.has_pillar_data %}
  {% set global.has_pillar_data = key in pillar and pillar[key] %}
{% endfor %}

{% if global.has_pillar_data %}
postgres_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info prometheus-postgres_exporter || /usr/bin/rpm --query --info golang-github-wrouesnel-postgres_exporter

postgres_exporter_configuration:
  file.managed:
    - name: /etc/postgres_exporter/postgres_exporter_queries.yaml
    - makedirs: True
    - source:
      - salt://srvmonitoring/postgres_exporter_queries.yaml
    - user: root
    - group: root
    - mode: 644

postgres_exporter_service:
  file.managed:
    - name: /etc/sysconfig/prometheus-postgres_exporter
    - source: salt://srvmonitoring/prometheus-postgres_exporter
    - template: jinja
    - user: root
    - group: root
    - mode: 644
    - require:
      - cmd: postgres_exporter
      - file: postgres_exporter_configuration
  service.running:
    - name: prometheus-postgres_exporter
    - enable: True
    - require:
      - file: postgres_exporter_service
    - watch:
      - file: postgres_exporter_configuration
{% endif %}

jmx_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info prometheus-jmx_exporter

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{% include 'srvmonitoring/removejmxprops.sls' %}

jmx_exporter_tomcat_yaml_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/tomcat/java_agent.yml
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - source:
      - salt://srvmonitoring/java_agent.yaml

jmx_tomcat_config:
  file.managed:
    - name: /usr/lib/systemd/system/tomcat.service.d/jmx.conf
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - source:
      - salt://srvmonitoring/tomcat_jmx.conf
    - require:
      - cmd: jmx_exporter
  mgrcompat.module_run:
    - name: service.systemctl_reload

jmx_exporter_tomcat_service_cleanup:
  service.dead:
    - name: prometheus-jmx_exporter@tomcat
    - enable: False

jmx_exporter_taskomatic_systemd_config_cleanup:
  file.absent:
    - name: /etc/prometheus-jmx_exporter/taskomatic/environment

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_exporter_taskomatic_yaml_config_cleanup:
  file.absent:
    - name: /etc/prometheus-jmx_exporter/taskomatic/prometheus-jmx_exporter.yml

jmx_exporter_taskomatic_yaml_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - source:
      - salt://srvmonitoring/java_agent.yaml

jmx_taskomatic_config:
  file.managed:
    - name: /usr/lib/systemd/system/taskomatic.service.d/jmx.conf
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - source:
      - salt://srvmonitoring/taskomatic_jmx.conf
    - require:
      - cmd: jmx_exporter
  mgrcompat.module_run:
    - name: service.systemctl_reload

jmx_exporter_taskomatic_service_cleanup:
  service.dead:
    - name: prometheus-jmx_exporter@taskomatic
    - enable: False

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 1/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 1' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 1' /etc/rhn/rhn.conf
   07070100000076000081B400000000000000000000000163F87E300000008C000000000000000000000000000000000000003300000000susemanager-sls/salt/srvmonitoring/java_agent.yaml    whitelistObjectNames:
  - java.lang:type=Threading,*
  - java.lang:type=Memory,*
  - Catalina:type=ThreadPool,name=*
rules:
- pattern: ".*"
07070100000077000081B400000000000000000000000163F87E300000044E000000000000000000000000000000000000004200000000susemanager-sls/salt/srvmonitoring/postgres_exporter_queries.yaml mgr_serveractions:
  query: |
    SELECT (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Queued'
       )
    ) AS queued,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Picked Up'
       )
    ) AS picked_up,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Completed')
       )
    ) AS completed,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Failed')
       )
    ) AS failed;
  metrics:
    - queued:
        usage: "GAUGE"
        description: "Count of queued Actions"
    - picked_up:
        usage: "GAUGE"
        description: "Count of picked up Actions"
    - completed:
        usage: "COUNTER"
        description: "Count of completed Actions"
    - failed:
        usage: "COUNTER"
        description: "Count of failed Actions"
  07070100000078000081B400000000000000000000000163F87E3000000324000000000000000000000000000000000000004000000000susemanager-sls/salt/srvmonitoring/prometheus-postgres_exporter   ## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        "postgresql://user:passwd@localhost:5432/database?sslmode=disable"
## ServiceRestart: postgres-exporter
#
# Connection URL to postgresql instance
#
DATA_SOURCE_NAME="postgresql://{{ pillar['db_user'] }}:{{ pillar['db_pass'] }}@{{ pillar['db_host'] }}:{{ pillar['db_port'] }}/{{ pillar['db_name'] }}?sslmode=disable"

## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        ""
## ServiceRestart: postgres-exporter
#
# Extra options for postgres-exporter
#
POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml"
07070100000079000081B400000000000000000000000163F87E300000059B000000000000000000000000000000000000003600000000susemanager-sls/salt/srvmonitoring/removejmxprops.sls remove_{{remove_jmx_props.service}}_jmx_host:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Dcom\.sun\.management\.jmxremote\.host=\S*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.host=' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_port:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Dcom\.sun\.management\.jmxremote\.port=[0-9]*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -E -- '-Dcom\.sun\.management\.jmxremote\.port=[0-9]+' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_ssl:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.ssl=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.ssl=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_auth:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.authenticate=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.authenticate=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_hostname:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Djava\.rmi\.server\.hostname=\S*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Djava.rmi.server.hostname=' {{remove_jmx_props.file}}
 0707010000007A000081B400000000000000000000000163F87E3000000335000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/status.sls node_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-node_exporter.service"

postgres_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-postgres_exporter.service"

jmx_tomcat_java_config:
  mgrcompat.module_run:
    - name: file.search
    - path: /usr/lib/systemd/system/tomcat.service.d/jmx.conf
    - pattern: "jmx_prometheus_javaagent.jar=5556"

jmx_taskomatic_java_config:
  mgrcompat.module_run:
    - name: file.search
    - path: /usr/lib/systemd/system/taskomatic.service.d/jmx.conf
    - pattern: "jmx_prometheus_javaagent.jar=5557"

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -q -E 'prometheus_monitoring_enabled\s*=\s*(1|y|true|yes|on)\s*$' /etc/rhn/rhn.conf

include:
  - util.syncstates
   0707010000007B000081B400000000000000000000000163F87E3000000097000000000000000000000000000000000000003700000000susemanager-sls/salt/srvmonitoring/taskomatic_jmx.conf    [Service]
Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml"
 0707010000007C000081B400000000000000000000000163F87E3000000096000000000000000000000000000000000000003300000000susemanager-sls/salt/srvmonitoring/tomcat_jmx.conf    [Service]
Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml"
  0707010000007D000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002300000000susemanager-sls/salt/ssh_bootstrap    0707010000007E000081B400000000000000000000000163F87E30000007BC000000000000000000000000000000000000002C00000000susemanager-sls/salt/ssh_bootstrap/init.sls   {%- set mgr_sudo_user = salt['pillar.get']('mgr_sudo_user') or 'root' %}

mgr_ssh_identity:
  ssh_auth.present:
    - user: {{ mgr_sudo_user }}
    - source: salt://salt_ssh/mgr_ssh_id.pub
{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
mgr_server_localhost_alias_present:
  host.present:
{% else %}
mgr_server_localhost_alias_absent:
  host.absent:
{% endif %}
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
no_push_key_authorized:
  ssh_auth.absent:
    - user: {{ mgr_sudo_user }}
    - comment: susemanager-ssh-push

proxy_ssh_identity:
  ssh_auth.present:
    - user: {{ mgr_sudo_user }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
    - require:
      - ssh_auth: no_push_key_authorized
{%- endif %}

{%- set home = salt['user.info'](mgr_sudo_user)['home'] %}

generate_own_ssh_key:
  cmd.run:
    - name: ssh-keygen -N '' -C 'susemanager-own-ssh-push' -f {{ home }}/.ssh/mgr_own_id -t rsa -q
    - creates: {{ home }}/.ssh/mgr_own_id.pub

ownership_own_ssh_key:
  file.managed:
    - name: {{ home }}/.ssh/mgr_own_id
    - user: {{ mgr_sudo_user }}
    - replace: False
    - require:
      - cmd: generate_own_ssh_key

ownership_own_ssh_pub_key:
  file.managed:
    - name: {{ home }}/.ssh/mgr_own_id.pub
    - user: {{ mgr_sudo_user }}
    - replace: False
    - require:
      - cmd: generate_own_ssh_key

no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ mgr_sudo_user }}
    - comment: susemanager-own-ssh-push
    - require:
      - file: ownership_own_ssh_key

authorize_own_key:
  ssh_auth.present:
    - user: {{ mgr_sudo_user }}
    - source: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - file: ownership_own_ssh_key
      - ssh_auth: no_own_key_authorized

{% include 'channels/gpg-keys.sls' %}
{% include 'bootstrap/remove_traditional_stack.sls' %}
0707010000007F000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/util 07070100000080000081B400000000000000000000000163F87E30000002D6000000000000000000000000000000000000003600000000susemanager-sls/salt/util/mgr_disable_fqdns_grain.sls {%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}
mgr_disable_fqdns_grains:
  file.append:
    - name: {{ susemanager_minion_config }}
    - text: "enable_fqdns_grains: False"
    - unless: grep 'enable_fqdns_grains:' /etc/salt/minion.d/susemanager.conf

mgr_salt_minion:
  service.running:
   - name: {{ salt_minion_name }}
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_fqdns_grains
  07070100000081000081B400000000000000000000000163F87E300000032E000000000000000000000000000000000000003700000000susemanager-sls/salt/util/mgr_mine_config_clean_up.sls    {%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
{%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager-mine.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager-mine.conf' %}
{%- endif -%}
mgr_disable_mine:
  file.managed:
    - name: {{ susemanager_minion_config }}
    - contents: "mine_enabled: False"
    - unless: grep 'mine_enabled:' /etc/salt/minion.d/susemanager-mine.conf

mgr_salt_minion:
  service.running:
   - name: {{ salt_minion_name }}
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_mine
{% endif %}
  07070100000082000081B400000000000000000000000163F87E3000000206000000000000000000000000000000000000003500000000susemanager-sls/salt/util/mgr_start_event_grains.sls  {%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}
mgr_start_event_grains:
  file.append:
    - name: {{ susemanager_minion_config }}
    - text: |
        start_event_grains: [machine_id, saltboot_initrd, susemanager]
    - unless: grep 'start_event_grains:' /etc/salt/minion.d/susemanager.conf
  07070100000083000081B400000000000000000000000163F87E3000000E01000000000000000000000000000000000000003800000000susemanager-sls/salt/util/mgr_switch_to_venv_minion.sls   {%- set susemanager_conf='/etc/salt/minion.d/susemanager.conf' %}
{%- set venv_susemanager_conf='/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- set managed_minion=salt['file.file_exists'](susemanager_conf) and
                       not salt['file.replace'](susemanager_conf, '^master: .*', 'master: ' + pillar['mgr_server'],
                                                dry_run=True, show_changes=False, ignore_if_missing=True) %}
{%- set venv_managed_minion=salt['file.file_exists'](venv_susemanager_conf) and
                            not salt['file.replace'](venv_susemanager_conf, '^master: .*', 'master: ' + pillar['mgr_server'],
                                                     dry_run=True, show_changes=False, ignore_if_missing=True) %}
{%- if managed_minion or venv_managed_minion %}
{%- set pkgs_installed = salt['pkg.info_installed']() %}
{%- set venv_minion_installed = pkgs_installed.get('venv-salt-minion', False) and True %}
{%- set venv_minion_available = venv_minion_installed or salt['pkg.latest_version']('venv-salt-minion') or False %}
{%- if venv_minion_available %}
mgr_venv_salt_minion_pkg:
  pkg.installed:
    - name: venv-salt-minion
    - onlyif:
      - ([ {{ venv_minion_installed }} = "False" ])

mgr_copy_salt_minion_id:
  file.copy:
    - name: /etc/venv-salt-minion/minion_id
    - source: /etc/salt/minion_id
    - require:
      - pkg: mgr_venv_salt_minion_pkg
    - onlyif:
      - test -f /etc/salt/minion_id

mgr_copy_salt_minion_configs:
  cmd.run:
    - name: cp -r /etc/salt/minion.d /etc/venv-salt-minion/
    - require:
      - pkg: mgr_venv_salt_minion_pkg
    - onlyif:
      - ([ {{ venv_managed_minion }} = "False" ])

mgr_copy_salt_minion_grains:
  file.copy:
    - name: /etc/venv-salt-minion/grains
    - source: /etc/salt/grains
    - require:
      - pkg: mgr_venv_salt_minion_pkg
    - onlyif:
      - test -f /etc/salt/grains

mgr_copy_salt_minion_keys:
  cmd.run:
    - name: cp -r /etc/salt/pki/minion/minion* /etc/venv-salt-minion/pki/minion/
    - require:
      - cmd: mgr_copy_salt_minion_configs
    - onlyif:
      - test -f /etc/salt/pki/minion/minion_master.pub
    - unless:
      - test -f /etc/venv-salt-minion/pki/minion/minion_master.pub

mgr_enable_venv_salt_minion:
  service.running:
    - name: venv-salt-minion
    - enable: True
    - require:
      - cmd: mgr_copy_salt_minion_keys

mgr_disable_salt_minion:
  service.dead:
    - name: salt-minion
    - enable: False
    - require:
      - service: mgr_enable_venv_salt_minion

{%- if salt['pillar.get']('mgr_purge_non_venv_salt') %}
mgr_purge_non_venv_salt_packages:
  pkg.purged:
    - pkgs:
      - salt
      - salt-common
      - salt-minion
      - python2-salt
      - python3-salt
    - require:
      - service: mgr_disable_salt_minion
{%- endif %}

{%- if salt['pillar.get']('mgr_purge_non_venv_salt_files') %}
mgr_purge_non_venv_salt_pki_dir:
  cmd.run:
    - name: rm -rf /etc/salt/minion* /etc/salt/pki/minion
    - onlyif:
      - test -d /etc/salt/pki/minion
    - require:
      - service: mgr_disable_salt_minion

mgr_purge_non_venv_salt_conf_dir:
  file.absent:
    - name: /etc/salt
    - unless:
      - find /etc/salt -type f -print -quit | grep -q .
    - require:
      - cmd: mgr_purge_non_venv_salt_pki_dir
{%- endif %}
{%- else %}
mgr_venv_salt_minion_unavailable:
  test.fail_without_changes:
    - comment: venv-salt-minion package is not available
{%- endif %}
{%- else %}
mgr_salt_minion_of_another_master:
  test.fail_without_changes:
    - comment: The salt-minion is managed by another master
{%- endif %}
   07070100000084000081B400000000000000000000000163F87E300000001B000000000000000000000000000000000000002300000000susemanager-sls/salt/util/noop.sls    mgr_do_nothing:
  test.nop
 07070100000085000081B400000000000000000000000163F87E30000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncbeacons.sls sync_beacons:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_beacons
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_beacons
{%- endif %}
07070100000086000081B400000000000000000000000163F87E300000005B000000000000000000000000000000000000002C00000000susemanager-sls/salt/util/synccustomall.sls   include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
 07070100000087000081B400000000000000000000000163F87E30000000D8000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncgrains.sls  sync_grains:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_grains:
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_grains
{%- endif %}
    - reload_grains: true
07070100000088000081B400000000000000000000000163F87E30000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncmodules.sls sync_modules:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_modules
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_modules
{%- endif %}
07070100000089000081B400000000000000000000000163F87E30000000E5000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncstates.sls  sync_states:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_states
{%- elif salt['saltutil.sync_states']() or True %}
  mgrcompat.module_run:
    - name: saltutil.sync_states
{%- endif %}

   0707010000008A000081B400000000000000000000000163F87E3000000192000000000000000000000000000000000000002900000000susemanager-sls/salt/util/systeminfo.sls  # Update system info on each minion start

include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
status_uptime:
  mgrcompat.module_run:
    - name: status.uptime
grains_update:
  mgrcompat.module_run:
    - name: grains.item
    - args:
      - kernelrelease
      - master

kernel_live_version:
  mgrcompat.module_run:
    - name: sumautil.get_kernel_live_version
  0707010000008B000081B400000000000000000000000163F87E300000016B000000000000000000000000000000000000002E00000000susemanager-sls/salt/util/systeminfo_full.sls # Collect full system info for minion registration

include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
status_uptime:
  mgrcompat.module_run:
    - name: status.uptime
grains_update:
  mgrcompat.module_run:
    - name: grains.items

kernel_live_version:
  mgrcompat.module_run:
    - name: sumautil.get_kernel_live_version
 0707010000008C000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/virt 0707010000008D000081B400000000000000000000000163F87E3000000210000000000000000000000000000000000000003400000000susemanager-sls/salt/virt/cluster-vm-primitive.conf   primitive {{ name }} VirtualDomain \
    params config="{{ path }}/{{ name }}.xml" \
    migration_transport=ssh autoset_utilization_cpu=false autoset_utilization_hv_memory=false start_resources=true save_config_on_stop=true \
    op start timeout=90s interval=0 \
    op stop timeout=90s interval=0 \
    op monitor timeout=30s interval=10s \
    meta target-role=Started allow-migrate=true
{%- if cluster_fs is not none %}
order {{ cluster_fs }}-then-{{ name }} Mandatory: {{ cluster_fs }}:start {{ name }}:start
{%- endif %}
0707010000008E000081B400000000000000000000000163F87E3000000F34000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/create-vm.sls   {% if pillar.get('boot', {}).get('kernel') -%}
kernel_cached:
  file.managed:
    - name: /tmp/virt/{{ pillar['name'] }}/kernel
    - source: salt://bootloader/{{ pillar['boot']['kernel'] }}
    - makedirs: True

initrd_cached:
  file.managed:
    - name: /tmp/virt/{{ pillar['name'] }}/initrd
    - source: salt://bootloader/{{ pillar['boot']['initrd'] }}
    - makedirs: True
{%- endif %}

{%- if 'interfaces' in pillar %}
nets-{{ pillar['name'] }}:
  virt_utils.network_running:
    - networks:
    {%- for nic in pillar['interfaces'] %}
      - {{ nic['source'] }}
    {%- endfor %}
{%- endif %}

{%- if 'disks' in pillar %}
pools-{{ pillar['name'] }}:
  virt_utils.pool_running:
    - pools:
  {%- for disk in pillar['disks'] %}
    {%- if 'pool' in disk %}
      - {{ disk['pool'] }}
    {%- endif %}
  {%- endfor %}
{%- endif %}

{% macro domain_params() -%}
    - name: {{ pillar['name'] }}
    - {{ salt.virt_utils.domain_parameters(pillar["vcpus"], pillar["mem"], pillar.get("template")) }}
    - os_type: {{ pillar['os_type'] }}
    - arch: {{ pillar['arch'] }}
    - vm_type: {{ pillar['vm_type'] }}
    - disks:
        {{ pillar['disks'] }}
{%- if 'interfaces' in pillar %}
    - interfaces:
        {{ pillar['interfaces'] }}
{%- endif %}
    - graphics:
        {{ pillar['graphics'] }}
{%- endmacro %}

{%- macro uefi() %}
  {%- for param, value in pillar.get("uefi", {}).items() %}
        {{ param }}: {{ value }}
  {%- endfor %}
{%- endmacro %}

{%- set cdrom_boot = pillar.get('boot_dev', 'hd').startswith('cdrom') -%}
{%- set ks_boot = pillar.get('boot', {}).get('kernel') is not none -%}
domain_first_boot_define:
  virt.running:
    {{ domain_params() }}
    - seed: False
    - boot_dev: {{ pillar.get('boot_dev', 'hd') }}
{%- if cdrom_boot or ks_boot %}
    - stop_on_reboot: True
{%- endif %}
{%- if ks_boot %}
    - boot:
        kernel: /tmp/virt/{{ pillar['name'] }}/kernel
        initrd: /tmp/virt/{{ pillar['name'] }}/initrd
        cmdline: {{ pillar['boot']['kopts'] }}
        {{ uefi() }}
    - require:
      - file: kernel_cached
      - file: initrd_cached
  {%- if 'interfaces' in pillar %}
      - virt_utils: nets-{{ pillar['name'] }}
  {%- endif %}
  {%- if 'disks' in pillar %}
      - virt_utils: pools-{{ pillar['name'] }}
  {%- endif %}
{%- elif "uefi" in pillar %}
    - boot:
        {{ uefi() }}
{%- endif %}

{%- if cdrom_boot or ks_boot %}
domain_define:
  virt.defined:
    {{ domain_params() }}
    - live: False
    - stop_on_reboot: False
  {%- if cdrom_boot %}
    - boot_dev: "hd"
  {%- endif %}
{%- if ks_boot %}
    - boot:
        kernel: null
        initrd: null
        cmdline: null
        {{ uefi() }}
{%- elif "uefi" in pillar %}
    - boot:
        {{ uefi() }}
{%- endif %}
    - require:
      - virt: domain_first_boot_define
{%- endif %}

{%- if pillar.get("cluster_definitions") %}

{{ pillar['cluster_definitions'] }}/{{ pillar['name'] }}.xml:
  mgrutils.cmd_dump:
    - cmd: 'virsh dumpxml --inactive {{ pillar['name'] }}'
    - require:
      - virt: {{ "domain_define" if cdrom_boot or ks_boot else "domain_first_boot_define" }}

/tmp/{{ pillar['name'] }}-primitive.conf:
  file.managed:
    - source: salt://virt/cluster-vm-primitive.conf
    - template: jinja
    - context:
        name: "{{ pillar['name'] }}"
        path: "{{ pillar['cluster_definitions'] }}"
        cluster_fs: {{ salt.virt_utils.get_cluster_filesystem(pillar["cluster_definitions"]) }}
    - require:
      - mgrutils: {{ pillar['cluster_definitions'] }}/{{ pillar['name'] }}.xml

define_primitive:
  cmd.run:
    - name: 'crm configure load update /tmp/{{ pillar['name'] }}-primitive.conf'
    - require:
      - file: /tmp/{{ pillar['name'] }}-primitive.conf

make_transient:
    mgrcompat.module_run:
        - name: virt.undefine
        - vm_: {{ pillar['name'] }}
        - require:
          - cmd: define_primitive

{%- endif %}
0707010000008F000081B400000000000000000000000163F87E300000024C000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/deleted.sls {%- set vm_info = salt.virt_utils.vm_info(pillar['domain_name']) %}
{%- set cluster_id = vm_info[pillar['domain_name']].get('cluster_primitive') %}

{%- if cluster_id %}
{{ pillar['domain_name'] }}:
  virt_utils.cluster_vm_removed:
    - primitive: {{ cluster_id }}
    - definition_path: {{ vm_info[pillar['domain_name']]['definition_path'] }}

{%- else %}
vm_stopped:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

mgr_virt_destroy:
  mgrcompat.module_run:
    - name: virt.purge
    - vm_: {{ pillar['domain_name'] }}
    - require:
      - virt: vm_stopped
{%- endif %}
07070100000090000081B400000000000000000000000163F87E300000020E000000000000000000000000000000000000002C00000000susemanager-sls/salt/virt/engine-events.sls   {% if pillar['virt_entitled'] %}
{% set minion_config_dir = salt["config.get"]("config_dir") %}
{{  minion_config_dir }}/minion.d/libvirt-events.conf:
  file.managed:
    - contents: |
        engines:
          - libvirt_events:
              filters:
                - domain/lifecycle
                - pool/lifecycle
                - pool/refresh
                - network/lifecycle

/var/cache/virt_state.cache:
  file.absent

{% else %}

{{ minion_config_dir }}/minion.d/libvirt-events.conf:
  file.absent

{% endif %}
  07070100000091000081B400000000000000000000000163F87E300000004E000000000000000000000000000000000000002C00000000susemanager-sls/salt/virt/guest-migrate.sls   crm resource move {{ pillar['primitive'] }} {{ pillar['target'] }}:
  cmd.run
  07070100000092000081B400000000000000000000000163F87E300000033B000000000000000000000000000000000000002D00000000susemanager-sls/salt/virt/network-create.sls  {% set active = salt.virt.network_info(pillar['network_name']).get(pillar['network_name'], {}).get('active') %}
{% set state = 'running' if active else pillar['action_type'] %}
{% macro optional(name) -%}
  {%- if pillar[name]|default(none) %}
    - {{ name }}: {{ pillar[name] }}
  {%- endif %}
{%- endmacro %}

{% set  optional_props = ['bridge', 'mtu', 'domain', 'physical_function', 'addresses',
                          'interfaces', 'tag', 'vport', 'nat', 'ipv4_config', 'ipv6_config', 'dns'] -%}
network_{{ state }}:
  virt.network_{{ state }}:
    - name: {{ pillar['network_name'] }}
    - forward: {{ pillar['forward']|default('null') }}
    - bridge: {{ pillar['bridge']|default('null') }}
    - autostart: {{ pillar['autostart'] }}
{%- for property in optional_props -%}
    {{ optional(property) }}
{%- endfor %}
 07070100000093000081B400000000000000000000000163F87E30000002D1000000000000000000000000000000000000003200000000susemanager-sls/salt/virt/network-statechange.sls {% if pillar['network_state'] != 'delete' %}
mgr_network_{{ pillar['network_state'] }}:
  mgrcompat.module_run:
    - name: virt.network_{{ pillar['network_state'] }}
    - m_name: {{ pillar['network_name'] }}

{% else %}
  {%- set net_info = salt.virt.network_info(pillar['network_name'])[pillar['network_name']] %}
  {%- if net_info["active"] == 1 %}
mgr_network_stop:
  mgrcompat.module_run:
    - name: virt.network_stop
    - m_name: {{ pillar['network_name'] }}
  {%- endif %}

mgr_network_delete:
  mgrcompat.module_run:
    - name: virt.network_undefine
    - m_name: {{ pillar['network_name'] }}
  {%- if net_info["active"] == 1 %}
    - require:
        - mgrcompat: mgr_network_stop
  {%- endif %}
{% endif %}
   07070100000094000081B400000000000000000000000163F87E30000015E4000000000000000000000000000000000000002A00000000susemanager-sls/salt/virt/pool-create.sls {% set pool_state = salt.virt.pool_info(pillar['pool_name']).get(pillar['pool_name'], {}).get('state') %}
{% set state = 'running' if pool_state == 'running' else pillar['action_type'] %}

pool_{{ state }}:
  virt.pool_{{ state }}:
    - name: {{ pillar['pool_name'] }}
    - ptype: {{ pillar['pool_type'] }}
    {% if pillar['target']|default(none) %}
    - target: {{ pillar['target'] }}
    {% endif %}
    - autostart: {{ pillar['autostart'] }}
    {% if pillar['permissions']|default(none) %}
    - permissions:
      {% if pillar['permissions']['mode']|default(none) %}
        mode: {{ pillar['permissions']['mode'] }}
      {% endif %}
      {% if pillar['permissions']['owner']|default(none) %}
        owner: {{ pillar['permissions']['owner'] }}
      {% endif %}
      {% if pillar['permissions']['group']|default(none) %}
        group: {{ pillar['permissions']['group'] }}
      {% endif %}
      {% if pillar['permissions']['label']|default(none) %}
        label: {{ pillar['permissions']['label'] }}
      {% endif %}
    {% endif %}  {# pillar['permissions']['mode']|default(none) #}
    {% if pillar['source']|default(none) %}
    - source:
      {% if pillar['source']['dir']|default(none) %}
        dir: {{ pillar['source']['dir'] }}
      {% endif %}
      {% if pillar['source']['name']|default(none) %}
        name: {{ pillar['source']['name'] }}
      {% endif %}
      {% if pillar['source']['format']|default(none) %}
        format: {{ pillar['source']['format'] }}
      {% endif %}
      {% if pillar['source']['initiator']|default(none) %}
        initiator: {{ pillar['source']['initiator'] }}
      {% endif %}
      {% if pillar['source']['hosts']|default(none) %}
        hosts:
        {% for host in pillar['source']['hosts'] %}
          - {{ host }}
        {% endfor %}
      {% endif %}  {# pillar['source']['hosts']|default(none) #}
      {% if pillar['source']['auth']|default(none) %}
        auth:
          username: {{ pillar['source']['auth']['username'] }}
          password: {{ pillar['source']['auth']['password'] }}
      {% endif %}  {# pillar['source']['auth']|default(none) #}
      {% if pillar['source']['devices']|default(none) %}
        devices:
        {% for device in pillar['source']['devices'] %}
          - path: {{ device['path'] }}
          {% if device['part_separator']|default(none) %}
            part_separator: {{ device['part_separator'] }}
          {% endif %}
        {% endfor %}
      {% endif %}  {# pillar['source']['devices']|default(none) #}
      {% if pillar['source']['adapter']|default(none) %}
        adapter:
        {% if pillar['source']['adapter']['type']|default(none) %}
          type: {{ pillar['source']['adapter']['type'] }}
        {% endif %}
        {% if pillar['source']['adapter']['name']|default(none) %}
          name: {{ pillar['source']['adapter']['name'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent']|default(none) %}
          parent: {{ pillar['source']['adapter']['parent'] }}
        {% endif %}
        {% if pillar['source']['adapter']['managed']|default(none) %}
          managed: {{ pillar['source']['adapter']['managed'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_wwnn']|default(none) %}
          parent_wwnn: {{ pillar['source']['adapter']['parent_wwnn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_wwpn']|default(none) %}
          parent_wwpn: {{ pillar['source']['adapter']['parent_wwpn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_fabric_wwn']|default(none) %}
          parent_fabric_wwn: {{ pillar['source']['adapter']['parent_fabric_wwn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['wwnn']|default(none) %}
          wwnn: {{ pillar['source']['adapter']['wwnn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['wwpn']|default(none) %}
          wwpn: {{ pillar['source']['adapter']['wwpn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_address']|default(none) %}
          parent_address:
          {% if pillar['source']['adapter']['parent_address']['unique_id']|default(none) %}
            unique_id: {{ pillar['source']['adapter']['parent_address']['unique_id'] }}
          {% endif %}
          {% if pillar['source']['adapter']['parent_address']['address']|default(none) %}
            address:
            {% if pillar['source']['adapter']['parent_address']['address']['domain']|default(none) %}
              domain: {{ pillar['source']['adapter']['parent_address']['address']['domain'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['bus']|default(none) %}
              bus: {{ pillar['source']['adapter']['parent_address']['address']['bus'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['slot']|default(none) %}
              slot: {{ pillar['source']['adapter']['parent_address']['address']['slot'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['function']|default(none) %}
              function: {{ pillar['source']['adapter']['parent_address']['address']['function'] }}
            {% endif %}
          {% endif %}  {# pillar['source']['adapter']['parent_address']['address']|default(none) #}
        {% endif %}  {# pillar['source']['adapter']['parent_address']|default(none) #}
      {% endif %}  {# pillar['source']['adapter']|default(none) #}
    {% endif %}  {# pillar['source']|default(none) #}
07070100000095000081B400000000000000000000000163F87E3000000075000000000000000000000000000000000000002B00000000susemanager-sls/salt/virt/pool-deleted.sls    mgr_pool_deleted:
  virt.pool_deleted:
    - name: {{ pillar['pool_name'] }}
    - purge: {{ pillar['pool_purge'] }}
   07070100000096000081B400000000000000000000000163F87E3000000072000000000000000000000000000000000000002D00000000susemanager-sls/salt/virt/pool-refreshed.sls  mgr_pool_refreshed:
  mgrcompat.module_run:
    - name: virt.pool_refresh
    - m_name: {{ pillar['pool_name'] }}
  07070100000097000081B400000000000000000000000163F87E3000000096000000000000000000000000000000000000002F00000000susemanager-sls/salt/virt/pool-statechange.sls    mgr_pool_{{ pillar['pool_state'] }}:
  mgrcompat.module_run:
    - name: virt.pool_{{ pillar['pool_state'] }}
    - m_name: {{ pillar['pool_name'] }}
  07070100000098000081B400000000000000000000000163F87E30000000B6000000000000000000000000000000000000002400000000susemanager-sls/salt/virt/reset.sls   powered_off:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

restarted:
  virt.running:
    - name: {{ pillar['domain_name'] }}
    - require:
      - virt: powered_off
  07070100000099000081B400000000000000000000000163F87E3000000068000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/resumed.sls mgr_virt_resume:
  mgrcompat.module_run:
    - name: virt.resume
    - vm_: {{ pillar['domain_name'] }}
0707010000009A000081B400000000000000000000000163F87E30000000A9000000000000000000000000000000000000002500000000susemanager-sls/salt/virt/setmem.sls  mgr_virt_mem:
  mgrcompat.module_run:
    - name: virt.setmem
    - vm_: {{ pillar['domain_name'] }}
    - memory: {{ pillar['domain_mem'] // 1024 }}
    - config: True
   0707010000009B000081B400000000000000000000000163F87E30000000A6000000000000000000000000000000000000002700000000susemanager-sls/salt/virt/setvcpus.sls    mgr_virt_vcpus:
  mgrcompat.module_run:
    - name: virt.setvcpus
    - vm_: {{ pillar['domain_name'] }}
    - vcpus: {{ pillar['domain_vcpus'] }}
    - config: True
  0707010000009C000081B400000000000000000000000163F87E3000000296000000000000000000000000000000000000002A00000000susemanager-sls/salt/virt/statechange.sls {%- set vm_info = salt.virt_utils.vm_info(pillar['domain_name']) %}
{%- set cluster_id = vm_info[pillar['domain_name']].get('cluster_primitive') %}
{%- set crm_action = {
  'running': 'start',
  'stopped': 'stop',
}.get(pillar['domain_state']) %}
{%- if cluster_id and crm_action %}
crm resource {{ crm_action }} {{ cluster_id }}:
  cmd.run
{%- else %}
{{ pillar['domain_name'] }}:
  {%- if pillar['domain_state'] == 'running' %}
  virt_utils.vm_resources_running:
    - name: {{ pillar['domain_name'] }}
  virt.running:
    - require:
      - virt_utils: {{ pillar['domain_name'] }}
  {%- else %}
  virt.{{ pillar['domain_state'] }}
  {%- endif %}
{%- endif %}
  0707010000009D000081B400000000000000000000000163F87E3000000068000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/suspended.sls   mgr_virt_suspend:
  mgrcompat.module_run:
    - name: virt.pause
    - vm_: {{ pillar['domain_name'] }}
0707010000009E000081B400000000000000000000000163F87E3000000918000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/update-vm.sls   {%- set vm_info = salt.virt_utils.vm_info(pillar['name']) %}
{%- set cluster_id = vm_info[pillar['name']].get('cluster_primitive') %}

{%- if cluster_id %}
temporary_define:
    mgrcompat.module_run:
        - name: virt.define_xml_path
        - path: {{ vm_info[pillar['name']]['definition_path'] }}
{%- endif %}

domain_update:
    mgrcompat.module_run:
        - name: virt.update
        - m_name: {{ pillar['name'] }}
        - cpu: {{ pillar['vcpus'] }}
        - mem: {{ pillar['mem'] }}
{% if 'disks' in pillar %}
        - disks:
    {% for disk in pillar['disks'] %}
            - name: {{ disk['name'] }}
              model: {{ disk['model'] }}
        {% if 'device' in disk %}
              device: {{ disk['device'] }}
        {% endif %}
        {% if 'type' in disk %}
              type: {{ disk['type'] }}
        {% endif %}
        {% if 'format' in disk %}
              format: {{ disk['format'] }}
        {% endif %}
        {% if 'source_file' in disk %}
              source_file: {{ disk['source_file'] if disk['source_file'] != '' else 'null' }}
        {% endif %}
        {% if 'pool' in disk %}
              pool: {{ disk['pool'] }}
        {% endif %}
        {% if 'size' in disk %}
              size: {{ disk['size'] }}
        {% endif %}
        {% if 'image' in disk %}
              image: {{ disk['image'] }}
        {% endif %}
    {% endfor %}
{% endif %}
{% if 'interfaces' in pillar %}
        - interfaces:
    {% for nic in pillar['interfaces'] %}
            - name: {{ nic['name'] }}
              type: {{ nic['type'] }}
              source: {{ nic['source'] }}
        {% if 'mac' in nic %}
              mac: {{ nic['mac'] if nic['mac'] != '' else 'null' }}
        {% endif %}
    {% endfor %}
{% endif %}
        - graphics:
            type: {{ pillar['graphics']['type'] }}
{%- if cluster_id %}
        - require:
            - mgrcompat: temporary_define

{{ vm_info[pillar['name']]['definition_path'] }}:
    mgrutils.cmd_dump:
        - cmd: 'virsh dumpxml --inactive {{ pillar['name'] }}'
        - require:
            - mgrcompat: domain_update

temporary_undefine:
    mgrcompat.module_run:
        - name: virt.undefine
        - vm_: {{ pillar['name'] }}
        - require:
            - mgrutils: {{ vm_info[pillar['name']]['definition_path'] }}
{%- endif %}
0707010000009F000081B400000000000000000000000163F87E30000000F2000000000000000000000000000000000000002D00000000susemanager-sls/salt/virt/volume-deleted.sls  include:
  - virt.pool-refreshed

mgr_volume_deleted:
  mgrcompat.module_run:
    - name: virt.volume_delete
    - pool: {{ pillar['pool_name'] }}
    - volume: {{ pillar['volume_name'] }}
    - require_in:
        - sls: virt.pool-refreshed
  070701000000A0000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001500000000susemanager-sls/scap  070701000000A1000081B400000000000000000000000163F87E3000001532000000000000000000000000000000000000002A00000000susemanager-sls/scap/xccdf-resume.xslt.in <?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright 2012 Red Hat Inc., Durham, North Carolina. All Rights Reserved.

This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License.

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
details.

You should have received a copy of the GNU Lesser General Public License along
with this library; if not, write to the Free Software Foundation, Inc., 59
Temple Place, Suite 330, Boston, MA  02111-1307 USA

Authors:
     Simon Lukasik <slukasik@redhat.com>
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
    xmlns:cdf1="http://checklists.nist.gov/xccdf/1.1"
    xmlns:cdf2="http://checklists.nist.gov/xccdf/1.2">
    <xsl:output method="xml" encoding="UTF-8"/>

    <xsl:template match="/">
        <benchmark-resume>
            <xsl:apply-templates select="*[local-name()='Benchmark']"/>
        </benchmark-resume>
    </xsl:template>

    <xsl:template match="cdf1:Benchmark | cdf2:Benchmark">
        <xsl:copy-of select="@id"/>
        <xsl:attribute name="version">
            <xsl:value-of select="normalize-space(cdf1:version/text()|cdf2:version/text())"/>
        </xsl:attribute>

        <xsl:variable name="profileId" select="cdf1:TestResult[1]/cdf1:profile/@idref | cdf2:TestResult[1]/cdf2:profile/@idref"/>
        <xsl:choose>
            <xsl:when test="not($profileId)"/> <!-- Do not send profile element when scanning with 'default' profile. -->
            <xsl:when test="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]">
                <xsl:apply-templates select="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]"/>
            </xsl:when>
            <xsl:otherwise>
                <profile title="Tailored profile">
                    <xsl:attribute name="id">
                         <xsl:value-of select="$profileId"/>
                    </xsl:attribute>
                </profile>
            </xsl:otherwise>
        </xsl:choose>
        <xsl:apply-templates select="cdf1:TestResult[1] | cdf2:TestResult[1]"/>
    </xsl:template>

    <xsl:template match="cdf1:Profile | cdf2:Profile">
        <profile>
            <xsl:attribute name="title">
                <xsl:value-of select="normalize-space(cdf1:title/text() | cdf2:title/text())"/>
            </xsl:attribute>
            <xsl:copy-of select="@id"/>
            <xsl:attribute name="description">
                <xsl:value-of select="normalize-space(cdf1:description[@xml:lang='en-US']/text() | cdf2:description[@xml:lang='en-US']/text())"/>
            </xsl:attribute>
        </profile>
    </xsl:template>

    <xsl:template match="cdf1:TestResult | cdf2:TestResult">
        <TestResult>
            <xsl:copy-of select="@id"/>
            <xsl:copy-of select="@start-time"/>
            <xsl:copy-of select="@end-time"/>
            <pass>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'pass'] | cdf2:rule-result[cdf2:result = 'pass']"/>
            </pass>
            <fail>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fail'] | cdf2:rule-result[cdf2:result = 'fail']"/>
            </fail>
            <error>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'error'] | cdf2:rule-result[cdf2:result = 'error']"/>
            </error>
            <unknown>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'unknown'] | cdf2:rule-result[cdf2:result = 'unknown']"/>
            </unknown>
            <notapplicable>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notapplicable'] | cdf2:rule-result[cdf2:result = 'notapplicable']"/>
            </notapplicable>
            <notchecked>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notchecked'] | cdf2:rule-result[cdf2:result = 'notchecked']"/>
            </notchecked>
            <notselected>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notselected'] | cdf2:rule-result[cdf2:result = 'notselected']"/>
            </notselected>
            <informational>
                   <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'informational'] | cdf2:rule-result[cdf2:result = 'informational']"/>
            </informational>
            <fixed>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fixed'] | cdf2:rule-result[cdf2:result = 'fixed']"/>
            </fixed>
        </TestResult>
    </xsl:template>

    <xsl:template match="cdf1:rule-result | cdf2:rule-result">
        <rr>
            <xsl:attribute name="id">
                <xsl:value-of select="normalize-space(@idref)"/>
            </xsl:attribute>
            <xsl:apply-templates select="cdf1:ident | cdf2:ident"/>
        </rr>
    </xsl:template>

    <xsl:template match="cdf1:ident | cdf2:ident">
        <ident>
            <xsl:copy-of select="@system"/>
            <xsl:value-of select="normalize-space(text())"/>
        </ident>
    </xsl:template>
</xsl:stylesheet>
  070701000000A2000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001400000000susemanager-sls/src   070701000000A3000081B400000000000000000000000163F87E300000021B000000000000000000000000000000000000001E00000000susemanager-sls/src/README.md ## Python Code Maintenance

Test are written with PyTest. This way:

1. Create your "test_foo.py" file.

2. Import with double-dot your package,
   so it will be included in the sys path, e.g.:

   from ..beacons import pkgset

3. Create a test function "def test_my_foo(..."

4. Rock-n-roll by simply calling "py.test".


Don't mind `.cache` and `__pycache__` directories,
they are ignored in an explicit `.gitignore`.

Have fun. :)

## Run Unit tests 

Use the following command to run unit test 
`make -f Makefile.python docker_pytest`
 070701000000A4000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002000000000susemanager-sls/src/__init__.py   070701000000A5000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001C00000000susemanager-sls/src/beacons   070701000000A6000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002800000000susemanager-sls/src/beacons/__init__.py   070701000000A7000081B400000000000000000000000163F87E30000009A8000000000000000000000000000000000000002600000000susemanager-sls/src/beacons/pkgset.py # -*- coding: utf-8 -*-
"""
Watch RPM or DPkg database via cookies and fire
an event to the SUSE Manager if that has been changed.

Author: Bo Maryniuk <bo@suse.de>
"""

from __future__ import absolute_import
import os

import salt.cache
import salt.config


__virtualname__ = "pkgset"

SALT_CONFIG_DIR = os.environ.get("SALT_CONFIG_DIR", "/etc/salt")

__opts__ = salt.config.minion_config(
    os.path.join(SALT_CONFIG_DIR, "minion")
)

CACHE = salt.cache.Cache(__opts__)

PKGSET_COOKIES = (
    os.path.join(__opts__["cachedir"], "rpmdb.cookie"),
    os.path.join(__opts__["cachedir"], "dpkg.cookie"),
)


def __virtual__():
    return __virtualname__


def validate(config):
    """
    The absence of this function could cause noisy logging,
    when logging level set to DEBUG or TRACE.
    So we need to have it with no any validation inside.
    """
    return True, "There is nothing to validate"


def beacon(config):
    """
    Watch the cookie file from package manager plugin.
    If its content changes, fire an event to the Master.

    Example Config

    .. code-block:: yaml

        beacons:
          pkgset:
            interval: 5

    """

    ret = []
    for cookie_path in PKGSET_COOKIES:
        if not os.path.exists(cookie_path):
            continue
        with open(cookie_path) as ck_file:
            ck_data = ck_file.read().strip()
            if __virtualname__ not in __context__:
                # After a minion restart, when this is running for first time, there is nothing in context yet
                # So, if there is any data in the cache, we put it in the context, if not we put the new data.
                # and update the data in the cache.
                cache_data = CACHE.fetch("beacon/pkgset", "cookie").get("data", None)
                if cache_data:
                    __context__[__virtualname__] = cache_data
                else:
                    __context__[__virtualname__] = ck_data
                    CACHE.store("beacon/pkgset", "cookie", {"data": ck_data})
            if __context__[__virtualname__] != ck_data:
                # Now it's time to fire beacon event only if the new data is not yet
                # inside the context (meaning not proceesed), and then stop iterating
                ret.append({"tag": "changed"})
                CACHE.store("beacon/pkgset", "cookie", {"data": ck_data})
                __context__[__virtualname__] = ck_data
                break

    return ret
070701000000A8000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001800000000susemanager-sls/src/doc   070701000000A9000081B400000000000000000000000163F87E30000008C4000000000000000000000000000000000000002200000000susemanager-sls/src/doc/README.md # Uyuni configuration modules (`uyuni_config`) documentation

These execution and state modules allow to configure organizations, users, user permissions on channels and system groups on an Uyuni or SUSE Manager Server.

## General pillar data configuration

Virtually all functions in the modules leverage the XMLRPC API. It is thus necessary to provide an Uyuni/SUSE Manager administrator user name and password, with permissions on the entities to configure.

It is possible and recommended to configure those credentials in a pillar file with the following structure:
```
uyuni:
  xmlrpc:
    user: admin
    password: admin
```

## Detailed function documentation

Individual methods, parameters and return values are documented in `uyuni_config_execution_module_doc.txt` and `uyuni_config_state_module_doc.txt` in the same directory that contains this file.

## Examples

A few examples are provided:

- `examples/uyuni_config_hardcode.sls`: shows how to define an organization, a trust, a system group and a user with channel permissions. Note: all credentials are hardcoded directly in the file for simplicity's sake, but should at least be moved to pillars in a production environment
- `examples/ldap/uyuni_users_ldap.sls`: shows how to define multiple users based on data coming from an LDAP server via the LDAP pillar module. This allows to implement syncing LDAP users to Uyuni/SUSE Manager

### LDAP example specifics

Configuration notes:
- see  "General pillar data configuration" above for general credential configuration in pillars
- one more pillar needs to be defined in which organization administrator credentials are specified for each organization the state is going to create users in. An example with one organization can be found in `examples/ldap/pillar_orgs.yaml`
- in order to retrieve data from an LDAP server, the [pillar_ldap module](https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.pillar_ldap.html) is used, and needs its own configuration pillar. An example can be found in `examples/ldap/pillar_ldap.yaml`

In this particular example, the following LDAP fields are extracted in order to match corresponding Uyuni/SUSE Manager parameters:
- user name
- email
- first_name
- last_name
- roles
070701000000AA000081B400000000000000000000000163F87E3000005839000000000000000000000000000000000000003E00000000susemanager-sls/src/doc/uyuni_config_execution_module_doc.txt === channel_list_manageable_channels
**(login, password)**
List all of manageable channels for the authenticated user

....
login: user login id
password: user password
....

    return: list of manageable channels for the user
    
=== channel_list_my_channels
**(login, password)**
List all of subscribed channels for the authenticated user

....
login: user login id
password: user password
....

    return: list of subscribed channels for the user

=== channel_software_is_globally_subscribable
**(channel_label, org_admin_user=None, org_admin_password=None)**
Returns whether the channel is globally subscribable on the organization

....
channel_label: label of the channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if channel is globally subscribable
    
=== channel_software_is_user_manageable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**

Returns whether the channel may be managed by the given user.

....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if user can manage channel or not
    
=== channel_software_is_user_subscribable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**

Returns whether the channel may be subscribed by the given user.

....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if user subscribe the channel or not
    
=== channel_software_set_user_manageable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**

Set the manageable flag for a given channel and user.
If access is set to 'true', this method will give the user manage permissions to the channel.
Otherwise, that privilege is revoked.

....
channel_label: label of the channel
login: user login id
access: True if the user should have management access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== channel_software_set_user_subscribable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**

Set the subscribable flag for a given channel and user.
If value is set to 'true', this method will give the user subscribe permissions to the channel.
Otherwise, that privilege is revoked.

....
channel_label: label of the channel
login: user login id
access: True if the user should have subscribe access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== master_select_minions
**(target=None, target_type='glob')**

Return list minions from the configured Salt Master on the same host which match the expression on the defined target

....
target: target expression to filter minions
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.
....
        
    return: list of minion IDs
    
=== org_create
**(name, org_admin_user, org_admin_password, first_name, last_name, email, admin_prefix='Mr.', pam=False, admin_user=None, admin_password=None)**

Create an Uyuni organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
admin_prefix: organization admin prefix
pam:organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dictionary with org information
    
=== org_delete
**(name, admin_user=None, admin_password=None)**

Delete an organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_get_details
**(name, admin_user=None, admin_password=None)**

Get details of an organization.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: organization details
    
=== org_list_orgs
**(admin_user=None, admin_password=None)**

List all organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: list of all available organizations.
    
=== org_trust_add_trust
**(org_id, org_trust_id, admin_user=None, admin_password=None)**

Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: Organization id
org_trust_id: Trust organization id
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_add_trust_by_name
**(org_name, org_trust, admin_user=None, admin_password=None)**

Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: organization name
org_trust: Trust organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_list_orgs
**(org_admin_user=None, org_admin_password=None)**

List all organizations trusted by the authenticated user organization

....
org_admin_user: organization admin user
org_admin_password: organization admin password
....

    return: List of organization details
    
=== org_trust_list_trusts
**(org_name, admin_user=None, admin_password=None)**

List all trusts for one organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: Name of the organization to get the trusts
admin_user: authentication user
admin_password: authentication user password
....

    return: list with all organizations and their trust status
    
=== org_trust_remove_trust
**(org_id, org_untrust_id, admin_user=None, admin_password=None)**

Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: orgnization id
org_untrust_id: organizaton id to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_remove_trust_by_name
**(org_name, org_untrust, admin_user=None, admin_password=None)**

Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: organization name
org_untrust: organization name to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_update_name
**(org_id, name, admin_user=None, admin_password=None)**

update an Uyuni organization name
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: organization internal id
name: new organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: organization details
    
=== systemgroup_add_remove_systems
**(name, add_remove, system_ids=[], org_admin_user=None, org_admin_password=None)**

Update systems on a system group.

....
name: Name of the system group.
add_remove: True to add to the group, False to remove.
system_ids: list of system ids to add/remove from group
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: boolean, True indicates success
    
=== systemgroup_create
**(name, descr, org_admin_user=None, org_admin_password=None)**

Create a system group.

....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systemgroup_delete
**(name, org_admin_user=None, org_admin_password=None)**

Delete a system group.

....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: boolean, True indicates success
    
=== systemgroup_get_details
**(name, org_admin_user=None, org_admin_password=None)**

Return system group details.

....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systemgroup_list_systems
**(name, minimal=True, org_admin_user=None, org_admin_password=None)**

List systems in a system group

....
name: Name of the system group.
minimal: default True. Only return minimal information about systems, use False to get more details
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: List of system information
    
=== systemgroup_update
**(name, descr, org_admin_user=None, org_admin_password=None)**

Update a system group.

....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systems_get_minion_id_map
**(username=None, password=None, refresh=False)**

Returns a map from minion ID to Uyuni system ID for all systems a user has access to

....
username: username to authenticate
password: password for user
refresh: Get new data from server, ignoring values in local context cache
....

    return: Map between minion ID and system ID of all system accessible by authenticated user
    
=== user_add_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**

Add system groups to user's list of assigned system groups.

....
login: user id to look for
server_group_names: systems groups to add to list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_add_role
**(login, role, org_admin_user=None, org_admin_password=None)**
Adds a role to an Uyuni user.

....
login: user id to look for
role: role to be added to the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_create
**(login, password, email, first_name, last_name, use_pam_auth=False, org_admin_user=None, org_admin_password=None)**

Create an Uyuni user.

....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
use_pam_auth: if you wish to use PAM authentication for this user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_delete
**(login, org_admin_user=None, org_admin_password=None)**

Deletes an Uyuni user

....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_get_details
**(login, password=None, org_admin_user=None, org_admin_password=None)**

Get details of an Uyuni user
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar will be used

....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: The user information
    
=== user_list_assigned_system_groups
**(login, org_admin_user=None, org_admin_password=None)**

Returns the system groups that a user can administer.

....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of system groups that a user can administer
    
=== user_list_roles
**(login, password=None, org_admin_user=None, org_admin_password=None)**

Returns an Uyuni user roles.
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar are used

....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of user roles assigned
    
=== user_list_users
**(org_admin_user=None, org_admin_password=None)**

Return all Uyuni users visible to the authenticated user.

....
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: all users visible to the authenticated user
    
=== user_remove_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**

Remove system groups from a user's list of assigned system groups.

....
login: user id to look for
server_group_names: systems groups to remove from list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_remove_role
**(login, role, org_admin_user=None, org_admin_password=None)**

Remove a role from an Uyuni user.

....
login: user id to look for
role: role to be removed from the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_set_details
**(login, password, email, first_name=None, last_name=None, org_admin_user=None, org_admin_password=None)**

Update an Uyuni user.

....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_get_details
**(id, org_admin_user=None, org_admin_password=None)**

Get details of an Uyuni Activation Key

....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return:Activation Key information

=== activation_key_delete
**(id, org_admin_user=None, org_admin_password=None)**

Deletes an Uyuni Activation Key

....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_create
**(key, description, base_channel_label='', usage_limit=0, system_types=[], universal_default=False, org_admin_user=None, org_admin_password=None)**

Creates an Uyuni Activation Key

....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
system_types: system types to be assigned.
              Can be one of: 'virtualization_host', 'container_build_host',
              'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_set_details
**(key, description=None, contact_method=None, base_channel_label=None, usage_limit=None, universal_default=False, org_admin_user=None, org_admin_password=None)**

Updates an Uyuni Activation Key

....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**

Add a list of entitlements to an activation key.

....
key: activation key name
system_types: list of system types to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**

Remove a list of entitlements from an activation key.

....
key: activation key name
system_types: list of system types to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**

Add child channels to an activation key.

....
key: activation key name
child_channels: List of child channels to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**

Remove child channels from an activation key.

....
key: activation key name
child_channels: List of child channels to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_check_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Return the status of the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, true if enabled, false if disabled

=== activation_key_enable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Enables the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_disable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Disables the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**

Add a list of packages to an activation key.

....
key: activation key name
packages: list of packages to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**

Remove a list of packages from an activation key.

....
key: activation key name
packages: list of packages to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**

Add a list of server groups to an activation key.

....
key: activation key name
server_groups: list of server groups to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**

Remove a list of server groups from an activation key.

....
key: activation key name
server_groups: list of server groups to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_list_config_channels
**(key, org_admin_user=None, org_admin_password=None)**

List configuration channels associated to an activation key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of configuration channels

=== activation_key_set_config_channels
**(keys, config_channel_label, org_admin_user=None, org_admin_password=None)**

Replace the existing set of configuration channels on the given activation keys.
Channels are ranked by their order in the array.

....
keys: list of activation key names
config_channel_label: list of configuration channels labels
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
   070701000000AB000081B400000000000000000000000163F87E3000001585000000000000000000000000000000000000003A00000000susemanager-sls/src/doc/uyuni_config_state_module_doc.txt === group_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni system group is not present

....
name: Group Name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== group_present
**(name, description, target=None, target_type='glob', org_admin_user=None, org_admin_password=None)**

Create or update an Uyuni system group

....
name: group name
description: group description
target: target expression used to filter which minions should be part of the group
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
            pillar_exact, compound, compound_pillar_exact. Default: glob.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== org_absent
**(name, admin_user=None, admin_password=None)**

Ensure an Uyuni organization is not present
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dict for Salt communication
    
=== org_present
**(name, org_admin_user, org_admin_password, first_name, last_name, email, pam=False, admin_user=None, admin_password=None)**

Create or update an Uyuni organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
pam: organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dict for Salt communication
    
=== org_trust
**(name, org_name, trusts, admin_user=None, admin_password=None)**

Establish trust relationships between Uyuni organizations.

....
name: state name
org_name: Organization name
trusts: list of organization names to trust
admin_user: administrator username
admin_password: administrator password
....

    return: dict for Salt communication
    
=== user_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni user is not present.

....
name: user login name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return:  dict for Salt communication
    
=== user_channels
**(name, password, manageable_channels=[], subscribable_channels=[], org_admin_user=None, org_admin_password=None)**

Ensure a user has access to the specified channels

....
name: user login name
password: user password
manageable_channels: channels user can manage
subscribable_channels: channels user can subscribe
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== user_present
**(name, password, email, first_name, last_name, use_pam_auth=False, roles=None, system_groups=None, org_admin_user=None, org_admin_password=None)**

Create or update an Uyuni user

....
name: user login name
password: desired password for the user
email: valid email address
first_name: First name
last_name: Last name
use_pam_auth: if you wish to use PAM authentication for this user
roles: roles to assign to user
system_groups: system_groups to assign to user
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication

=== activation_key_present
**(name, description, base_channel='', usage_limit=0, contact_method='default', system_types=[],
   universal_default=False, child_channels=[], configuration_channels=[], packages=[],
   server_groups=[], configure_after_registration=False, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni Activation Key is present.

....
name: the Activation Key name
description: the Activation description
base_channel: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
system_types: system types to be assigned.
              Can be one of: 'virtualization_host', 'container_build_host',
              'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
child_channels: list of child channels to be assigned
configuration_channels: list of configuration channels to be assigned
packages: list of packages which will be installed
server_groups: list of server groups to assign the activation key with
configure_after_registration: deploy configuration files to systems on registration
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication

=== activation_key_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni Activation Key is not present.

....
name: the Activation Key name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
   070701000000AC000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001D00000000susemanager-sls/src/examples  070701000000AD000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002200000000susemanager-sls/src/examples/ldap 070701000000AE000081B400000000000000000000000163F87E300000020B000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_ldap.yaml    ldap-roles:
  server:    ldap.example.com
  port:      389
  anonymous: true
  mode:      map
  dn:        ou=permissions,dc=example,dc=com
  filter:    '(objectclass=groupOfNames)'
  attrs:
    - cn
    - dn

ldap-users:
  server:    ldap.example.com
  port:      389
  anonymous: true
  mode:      map
  dn:        ou=people,dc=example,dc=com
  filter:    '(objectclass=person)'
  attrs:
    - givenName
    - sn
    - mail
    - uid
    - ou
    - dn
  lists:
    - memberOf

search_order:
  - ldap-roles
  - ldap-users
 070701000000AF000081B400000000000000000000000163F87E30000000BC000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_orgs.yaml    uyuni:
  orgs:
    - org_id: MY-ORG
      org_admin_user: org_admin_user
      org_admin_password: org_admin_pass
      first_name: admin
      last_name: admin
      email: admin@org.com
070701000000B0000081B400000000000000000000000163F87E3000000620000000000000000000000000000000000000003700000000susemanager-sls/src/examples/ldap/uyuni_users_ldap.sls    
## Create organizations based on static pillar data
{% set org_auth = {} %}

{% for org in pillar.get('uyuni', {}).get('orgs', []) %}
{{org['org_id']}}:
  uyuni.org_present:
    - name: {{org['org_id']}}
    - org_admin_user: {{org['org_admin_user']}}
    - org_admin_password: {{org['org_admin_password']}}
    - first_name: {{org['first_name']}}
    - last_name: {{org['last_name']}}
    - email: {{org['email']}}
{% set _ = org_auth.update({org.org_id: {'org_admin_user': org.org_admin_user,  'org_admin_password': org.org_admin_password }}) %}
{% endfor %}

## load available roles to local map variable
## those where extracted form ldap to pillar
{% set roles_map = {} %}
{% for role in pillar.get('ldap-roles', []) %}
{% set _ = roles_map.update({role.dn: role.cn}) %}
{% endfor %}

{% for user in pillar.get('ldap-users', []) %}

  {% set admin_user = None %}
  {% set admin_password = None %}
  {% if org_auth[user['ou']] %}
    {% set admin_user = org_auth[user['ou']].org_admin_user %}
    {% set admin_password = org_auth[user['ou']].org_admin_password %}
  {% endif %}

{{user['uid']}}:
  uyuni.user_present:
    - name: {{user['uid']}}
    - password: 'dummy_local_pass'
    - email: {{user['mail']}}
    - first_name: {{user['givenName']}}
    - last_name: {{user['sn']}}
    - use_pam_auth: true
    - org_admin_user: {{admin_user}}
    - org_admin_password: {{admin_password}}
    {% if user['memberOf'] %}
    - roles:
      {% for user_role in user['memberOf'] %}
      - {{ roles_map[user_role] }}
      {% endfor %}
    {% endif %}

{% endfor %}
070701000000B1000081B400000000000000000000000163F87E30000007BA000000000000000000000000000000000000003700000000susemanager-sls/src/examples/uyuni_config_hardcode.sls    ## manage orgs
my_org:
  uyuni.org_present:
    - name: my_org
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - first_name: first_name
    - last_name: last_name__
    - email: my_org_user@org.com
    - admin_user: admin
    - admin_password: admin

org_trust_present:
  uyuni.org_trust:
    - org_name: SUSE
    - trusts:
      - my_org

# manager system groups
system_group_httpd:
  uyuni.group_present:
    - name: httpd_servers
    - description: httpd_servers
    - target: "*httpd*"
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user

#manager users
user_1:
  uyuni.user_present:
    - name: user1
    - password: user1
    - email: user1@teest.como
    - first_name: first
    - last_name: last
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - roles: ["system_group_admin", "channel_admin"]
    - system_groups:
      - httpd_servers

user_1_channels:
  ## remane it to user_channels (without _present)
  uyuni.user_channels:
    - name: user1
    - password: user1
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - manageable_channels:
      - my_local_channel
    - subscribable_channels:
      - new_local

define_custom_activation_key:
    uyuni.activation_key_present:
        - name: my-suse
        - description: "My Activation Key created via Salt"
        - org_admin_user: my_org_user
        - org_admin_password: my_org_user
        - base_channel: sle-product-sles15-sp2-pool-x86_64
        - child_channels:
            - sle-module-server-applications15-sp2-pool-x86_64
            - sle-module-server-applications15-sp2-updates-x86_64
        - configuration_channels:
            - firewall
        - packages:
            - name: emacs
              arch: x86_64
        - server_groups:
            - httpd_servers
        - usage_limit: 10
        - system_types:
            - virtualization_host
        - configure_after_registration: true
  070701000000B2000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001B00000000susemanager-sls/src/grains    070701000000B3000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002700000000susemanager-sls/src/grains/__init__.py    070701000000B4000081B400000000000000000000000163F87E3000001472000000000000000000000000000000000000002600000000susemanager-sls/src/grains/cpuinfo.py import json
import logging
import salt.modules.cmdmod
import salt.utils
import os
import re

try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

from salt.exceptions import CommandExecutionError

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def _lscpu_count_sockets(feedback):
    '''
    Use lscpu method

    :return:
    '''
    lscpu = _which_bin(['lscpu'])
    if lscpu is not None:
        try:
            log.debug("Trying lscpu to get CPU socket count")
            ret = __salt__['cmd.run_all']('{0} -p'.format(lscpu), output_loglevel='quiet')
            if ret['retcode'] == 0:
                max_socket_index = -1
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue
                    socket_index = int(line.split(',')[2])
                    if socket_index > max_socket_index:
                        max_socket_index = socket_index
                if max_socket_index > -1:
                    return {'cpusockets': (1 + max_socket_index)}
        except Exception as error:
            feedback.append("lscpu: {0}".format(str(error)))
            log.debug(str(error))


def _cpuinfo_count_sockets(feedback):
    '''
    Use parsing /proc/cpuinfo method.

    :return:
    '''
    physids = set()
    if os.access("/proc/cpuinfo", os.R_OK):
        try:
            log.debug("Trying /proc/cpuinfo to get CPU socket count")
            with open('/proc/cpuinfo') as handle:
                for line in handle.readlines():
                    if line.strip().startswith('physical id'):
                        comps = line.split(':')
                        if len(comps) < 2 or len(comps[1]) < 2:
                            continue
                        physids.add(comps[1].strip())
            if physids:
                return {'cpusockets': len(physids)}
        except Exception as error:
            log.debug(str(error))
            feedback.append("/proc/cpuinfo: {0}".format(str(error)))
        else:
            feedback.append('/proc/cpuinfo: format is not applicable')


def _dmidecode_count_sockets(feedback):
    '''
    Use dmidecode method.

    :return:
    '''
    dmidecode = _which_bin(['dmidecode'])
    if dmidecode is not None:
        try:
            log.debug("Trying dmidecode to get CPU socket count")
            ret = __salt__['cmd.run_all']("{0} -t processor".format(dmidecode), output_loglevel='quiet')
            if ret['retcode'] == 0:
                count = 0
                for line in ret['stdout'].strip().splitlines():
                    if 'Processor Information' in line:
                        count += 1
                if count:
                    return {'cpusockets': count}
        except Exception as error:
            log.debug(str(error))
            feedback.append("dmidecode: {0}".format(str(error)))
    else:
        feedback.append("dmidecode: executable not found")


def cpusockets():
    """
    Returns the number of CPU sockets.
    """
    feedback = list()
    grains = _lscpu_count_sockets(feedback) or _cpuinfo_count_sockets(feedback) or _dmidecode_count_sockets(feedback)
    if not grains:
        log.warn("Could not determine CPU socket count: {0}".format(' '.join(feedback)))

    return grains


def total_num_cpus():
    """ returns the total number of CPU in system.
    /proc/cpuinfo shows the number of active CPUs
    On s390x this can be different from the number of present CPUs in a system
    See IBM redbook: "Using z/VM for Test and Development Environments: A Roundup" chapter 3.5
    """
    re_cpu = re.compile(r"^cpu[0-9]+$")
    sysdev = '/sys/devices/system/cpu/'
    return {'total_num_cpus': len([cpud for cpud in (os.path.exists(sysdev) and os.listdir(sysdev) or list())
                                   if re_cpu.match(cpud)])}


def cpu_data():
    """
    Returns the cpu model, vendor ID and other data that may not be in the cpuinfo
    """
    lscpu = _which_bin(['lscpu'])
    if lscpu is not None:
        try:
            log.debug("Trying lscpu to get CPU data")
            ret = __salt__['cmd.run_all']('{0}'.format(lscpu), env={'LC_ALL': 'C'}, output_loglevel='quiet')
            if ret['retcode'] == 0:
                lines = ret["stdout"].splitlines()
                name_map = {
                    "Model name": "cpu_model",
                    "Vendor ID": "cpu_vendor",
                    "NUMA node(s)": "cpu_numanodes",
                    "Stepping": "cpu_stepping",
                    "Core(s) per socket": "cpu_cores",
                }
                values = {}
                for line in lines:
                    parts = [l.strip() for l in line.split(":", 1)]
                    if len(parts) == 2 and parts[0] in name_map:
                        values[name_map[parts[0]]] = parts[1]
                log.debug(values)
                return values
            else:
                log.warn("lscpu does not support -J option")
        except (CommandExecutionError, ValueError) as error:
            log.warn("lscpu: {0}".format(str(error)))
  070701000000B5000081B400000000000000000000000163F87E3000001285000000000000000000000000000000000000002B00000000susemanager-sls/src/grains/public_cloud.py    # -*- coding: utf-8 -*-
'''
Copyright (c) 2019 SUSE LLC

This software is licensed to you under the GNU General Public License,
version 2 (GPLv2). There is NO WARRANTY for this software, express or
implied, including the implied warranties of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
along with this software; if not, see
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.

This grain module is only loaded in case of a public cloud instance.

Supported Instances: AWS EC2, Azure and Google Compute Engine instances

Returns a grain called "instance_id" containing the virtual instance ID
according to the Public Cloud provider. The data is gathered using the
internal API available from within the instance.

Author: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Based on: https://docs.saltstack.com/en/latest/ref/grains/all/salt.grains.metadata.html
'''
from __future__ import absolute_import, print_function, unicode_literals

# Import python libs
import os
import socket
from multiprocessing.pool import ThreadPool
import logging

# Import salt libs
import salt.utils.http as http

# Internal metadata API information
INTERNAL_API_IP = '169.254.169.254'
HOST = 'http://{0}/'.format(INTERNAL_API_IP)

INSTANCE_ID = None

AMAZON_URL_PATH = 'latest/meta-data/'
AZURE_URL_PATH = 'metadata/instance/compute/'
AZURE_API_ARGS = '?api-version=2017-08-01&format=text'
GOOGLE_URL_PATH = 'computeMetadata/v1/instance/'

log = logging.getLogger(__name__)


def __virtual__():
    global INSTANCE_ID
    log.debug("Checking if minion is running in the public cloud")
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.settimeout(0.1)
    result = sock.connect_ex((INTERNAL_API_IP, 80))
    if result != 0:
        return False

    def _do_api_request(data):
        opts = {
            'http_connect_timeout': 0.1,
            'http_request_timeout': 0.1,
        }
        try:
            ret = {
                data[0]: http.query(data[1],
                                    status=True,
                                    header_dict=data[2],
                                    raise_error=False,
                                    opts=opts)
            }
        except:
            ret = { data[0]: dict() }
        return ret

    api_check_dict = [
        ('amazon', os.path.join(HOST, AMAZON_URL_PATH), None),
        ('google', os.path.join(HOST, GOOGLE_URL_PATH), {"Metadata-Flavor": "Google"}),
        ('azure', os.path.join(HOST, AZURE_URL_PATH) + AZURE_API_ARGS, {"Metadata":"true"}),
    ]

    api_ret = {}
    results = []

    try:
       pool = ThreadPool(3)
       results = pool.map(_do_api_request, api_check_dict)
       pool.close()
       pool.join()
    except Exception as exc:
       import traceback
       log.error(traceback.format_exc())
       log.error("Exception while creating a ThreadPool for accessing metadata API: %s", exc)

    for i in results:
        api_ret.update(i)

    if _is_valid_endpoint(api_ret['amazon'], 'instance-id'):
        INSTANCE_ID = http.query(os.path.join(HOST, AMAZON_URL_PATH, 'instance-id'), raise_error=False)['body']
        return True
    elif _is_valid_endpoint(api_ret['azure'], 'vmId'):
        INSTANCE_ID = http.query(os.path.join(HOST, AZURE_URL_PATH, 'vmId') + AZURE_API_ARGS, header_dict={"Metadata":"true"}, raise_error=False)['body']
        return True
    elif _is_valid_endpoint(api_ret['google'], 'id'):
        INSTANCE_ID = http.query(os.path.join(HOST, GOOGLE_URL_PATH, 'id'), header_dict={"Metadata-Flavor": "Google"}, raise_error=False)['body']
        return True

    return False


def _is_valid_endpoint(response, tag):
    if not response.get('status', 0) == 200:
        return False
    elif not tag in response.get('body', ''):
        return False
    elif ' ' in response.get('body', ''):
        return False
    else:
        return True


def _is_valid_instance_id(id_str):
    if not id_str:
        return False
    if os.linesep in id_str:
        return False
    elif ' ' in id_str:
        return False
    elif len(id_str) > 128:
        return False
    else:
        return True


def instance_id():
    global INSTANCE_ID
    ret = {}
    if _is_valid_instance_id(INSTANCE_ID):
        log.debug("This minion is running in the public cloud. Adding instance_id to grains: {}".format(INSTANCE_ID))
        ret['instance_id'] = INSTANCE_ID
    else:
        log.error("The obtained public cloud instance id doesn't seems correct: {}".format(INSTANCE_ID))
        log.error("Skipping")
    return ret

def is_payg_instance():
    ret = {}
    if os.path.isfile('/usr/sbin/registercloudguest'):
        ret['is_payg_instance'] = True
    return ret
   070701000000B6000081B400000000000000000000000163F87E30000006DF000000000000000000000000000000000000002300000000susemanager-sls/src/grains/virt.py    """
Grains for virtualization hosts
"""

import logging
import re
import subprocess
from xml.etree import ElementTree
import salt.modules.virt

log = logging.getLogger(__name__)


def __virtual__():
    return salt.modules.virt.__virtual__()


def features():
    """returns the features map of the virt module"""
    try:
        in_cluster = subprocess.check_call(["crm", "status"]) == 0
    except Exception:
        in_cluster = False

    try:
        ra_conf = ElementTree.fromstring(
            subprocess.Popen(
                ["crm_resource", "--show-metadata", "ocf:heartbeat:VirtualDomain"],
                stdout=subprocess.PIPE,
            ).communicate()[0]
        )
        start_resources_ra = (
            ra_conf.find(".//parameter[@name='start_resources']") is not None
        )
    except Exception:
        start_resources_ra = False

    libvirt_version = -1
    try:
        version_out = subprocess.Popen(["libvirtd", "-V"], stdout=subprocess.PIPE).communicate()[0]
        matcher = re.search(b'(\d+)\.(\d+)\.(\d+)', version_out)
        if matcher:
            libvirt_version = 0
            for idx in range(len(matcher.groups())):
                libvirt_version += int(matcher.group(idx + 1)) * 1000 ** (len(matcher.groups()) - idx - 1)
    except OSError:
        log.error("libvirtd is not installed or is not in the PATH")

    return {
        "virt_features": {
            "enhanced_network": "network_update" in salt.modules.virt.__dict__,
            "cluster": in_cluster,
            "resource_agent_start_resources": start_resources_ra,
            # Libvirt has the firmware='efi' support since 5.2, but vital fixes came in 5.3 only
            "uefi_auto_loader": libvirt_version >= 5003000,
        },
    }
 070701000000B7000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001C00000000susemanager-sls/src/modules   070701000000B8000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002800000000susemanager-sls/src/modules/__init__.py   070701000000B9000081B400000000000000000000000163F87E3000003199000000000000000000000000000000000000002900000000susemanager-sls/src/modules/kiwi_info.py  import salt.exceptions
import logging
import os
import re
import hashlib
import json

log = logging.getLogger(__name__)

# Kiwi version is always in format "MAJOR.MINOR.RELEASE" with numeric values
# Source https://osinside.github.io/kiwi/image_description/elements.html#preferences-version
KIWI_VERSION_REGEX=r'\d+\.\d+\.\d+'
# Taken from Kiwi sources https://github.com/OSInside/kiwi/blob/eb2b1a84bf7/kiwi/schema/kiwi.rng#L81
KIWI_ARCH_REGEX=r'(x86_64|i586|i686|ix86|aarch64|arm64|armv5el|armv5tel|armv6hl|armv6l|armv7hl|armv7l|ppc|ppc64|ppc64le|s390|s390x|riscv64)'
# Taken from Kiwi sources https://github.com/OSInside/kiwi/blob/eb2b1a84bf7/kiwi/schema/kiwi.rng#L26
KIWI_NAME_REGEX=r'[a-zA-Z0-9_\-\.]+'

def parse_profile(chroot):
    ret = {}
    path = os.path.join(chroot, 'image', '.profile')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)='(?P<val>.*)'")
        for line in profile.splitlines():
            match = pattern.match(line)
            if match:
                ret[match.group('name')] = match.group('val')
    return ret

def parse_buildinfo(dest):
    ret = {}
    path = os.path.join(dest, 'kiwi.buildinfo')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern_group = re.compile(r"^\[(?P<name>.*)\]")
        pattern_val = re.compile(r"^(?P<name>.*?)=(?P<val>.*)")

        group = ret
        for line in profile.splitlines():
            match = pattern_group.match(line)
            if match:
                group = {}
                ret[match.group('name')] = group

            match = pattern_val.match(line)
            if match:
                group[match.group('name')] = match.group('val')
    return ret

# fallback for SLES11 Kiwi and for Kiwi NG that does not create the buildinfo file
def guess_buildinfo(dest):
    ret = {'main': {}}
    files = __salt__['file.readdir'](dest)

    pattern_basename = re.compile(r"^(?P<basename>.*)\.packages$")
    pattern_pxe_initrd = re.compile(r"^initrd-netboot.*")
    pattern_pxe_kiwi_ng_initrd = re.compile(r".*\.initrd\..*")
    pattern_pxe_kernel = re.compile(r".*\.kernel\..*")
    pattern_pxe_kiwi_ng_kernel = re.compile(r".*\.kernel$")
    have_kernel = False
    have_initrd = False

    for f in files:
        match = pattern_basename.match(f)
        if match:
            ret['main']['image.basename'] = match.group('basename')

        match = pattern_pxe_initrd.match(f) or pattern_pxe_kiwi_ng_initrd.match(f)
        if match:
            have_initrd = True

        match = pattern_pxe_kernel.match(f) or pattern_pxe_kiwi_ng_kernel.match(f)
        if match:
            have_kernel = True

    if have_kernel and have_initrd:
        ret['main']['image.type'] = 'pxe'
    return ret

# Kiwi NG
_kiwi_result_script = """
import sys
import pickle
import json
ret = {}
with open(sys.argv[1], 'rb') as f:
    result = pickle.load(f)
    ret['arch'] = result.xml_state.host_architecture
    ret['basename'] = result.xml_state.xml_data.name
    ret['type'] = result.xml_state.build_type.image
    ret['filesystem'] = result.xml_state.build_type.filesystem
    ret['initrd_system'] = result.xml_state.build_type.initrd_system
    print(json.dumps(ret))
"""

def parse_kiwi_result(dest):
    path = os.path.join(dest, 'kiwi.result')
    ret = {}
    if __salt__['file.file_exists'](path):
        # pickle depends on availability of python kiwi modules
        # which are not under our control so there is certain risk of failure
        # also, the kiwi libraries may not be available in salt bundle
        # -> parse the file via wrapper script using system python3
        #
        # return empty dict on failure
        # the caller should handle all values as optional
        result = __salt__['cmd.exec_code_all']('/usr/bin/python3', _kiwi_result_script, args=[path])
        if result['retcode'] == 0:
            ret = json.loads(result['stdout'])
        # else return empty dict

    return ret

def parse_packages(path):
    ret = []
    if __salt__['file.file_exists'](path):
        packages = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)\|(?P<epoch>.*?)\|(?P<version>.*?)\|(?P<release>.*?)\|(?P<arch>.*?)\|(?P<disturl>.*?)(\|(?P<license>.*))?$")
        for line in packages.splitlines():
            match = pattern.match(line)
            if match:
                # translate '(none)' values to ''
                d = match.groupdict()
                for k in list(d.keys()):
                    if d[k] == '(none)':
                        d[k] = ''

                # if arch is '' and name begins gpg-pubkey then skip the package
                if d['arch'] == '' and d['name'].startswith('gpg-pubkey'):
                    continue

                ret.append(d)
    return ret

def get_md5(path):
    res = {}
    if not __salt__['file.file_exists'](path):
        return res

    res['hash'] = __salt__['file.get_hash'](path, form='md5')
    res['size'] = __salt__['file.stats'](path).get('size')
    return res

def parse_kiwi_md5(path, compressed = False):
    res = {}

    if not __salt__['file.file_exists'](path):
        return res

    md5_str = __salt__['cp.get_file_str'](path)
    if md5_str is not None:
        if compressed:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s+(?P<csize1>[0-9]+)\s+(?P<csize2>[0-9]+)\s*$")
        else:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s*$")
        match = pattern.match(md5_str)
        if match:
            res['hash'] = match.group('md5')
            res['size'] = int(match.group('size1')) * int(match.group('size2'))
            if compressed:
                res['compressed_size'] = int(match.group('csize1')) * int(match.group('csize2'))
    return res

_compression_types = [
    { 'suffix': '.gz', 'compression': 'gzip' },
    { 'suffix': '.bz', 'compression': 'bzip' },
    { 'suffix': '.xz', 'compression': 'xz' },
    { 'suffix': '.install.iso',    'compression': None },
    { 'suffix': '.iso',            'compression': None },
    { 'suffix': '.raw',            'compression': None },
    { 'suffix': '',    'compression': None }
    ]

def image_details(dest, bundle_dest = None):
    res = {}
    buildinfo = parse_buildinfo(dest) or guess_buildinfo(dest)
    kiwiresult = parse_kiwi_result(dest)

    basename = buildinfo.get('main', {}).get('image.basename', '')
    image_type = kiwiresult.get('type') or buildinfo.get('main', {}).get('image.type', 'unknown')
    fstype = kiwiresult.get('filesystem')

    pattern = re.compile(r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})$".format(KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX))
    match = pattern.match(basename)
    if match:
        name = match.group('name')
        arch = match.group('arch')
        version = match.group('version')
    else:
        return None

    filename = None
    filepath = None
    compression = None
    for c in _compression_types:
        path = os.path.join(dest, basename + c['suffix'])
        if __salt__['file.file_exists'](path):
            compression = c['compression']
            filename = basename + c['suffix']
            filepath = path
            break

    res['image'] = {
        'basename': basename,
        'name': name,
        'arch': arch,
        'type': image_type,
        'version': version,
        'filename': filename,
        'filepath': filepath,
        'fstype': fstype
    }
    if compression:
        res['image'].update({
            'compression': compression,
            'compressed_hash': __salt__['hashutil.digest_file'](filepath, checksum='md5')
        })

    res['image'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5'), compression is not None))

    if bundle_dest is not None:
      res['bundles'] = inspect_bundles(bundle_dest, basename)

    return res

def inspect_image(dest, bundle_dest = None):
    res = image_details(dest, bundle_dest)
    if not res:
      return None

    basename = res['image']['basename']
    image_type = res['image']['type']

    for fstype in ['ext2', 'ext3', 'ext4', 'btrfs', 'xfs']:
        path = os.path.join(dest, basename + '.' + fstype)
        if __salt__['file.file_exists'](path) or __salt__['file.is_link'](path):
            res['image']['fstype'] = fstype
            break

    res['packages'] = parse_packages(os.path.join(dest, basename + '.packages'))

    if image_type == 'pxe':
        res['boot_image'] = inspect_boot_image(dest)

    return res


def inspect_boot_image(dest):
    res = None
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})\.kernel\.(?P<kernelversion>.*)\.md5$".format(KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX))
    pattern_kiwi_ng = re.compile(r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})-(?P<kernelversion>.*)\.kernel$".format(KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX))
    for f in files:
        match = pattern.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                    },
                'kiwi_ng': False
            }
            break
        match = pattern_kiwi_ng.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                },
                'kiwi_ng': True
            }
            break

    if res is None:
        return None

    for c in _compression_types:
        if res['kiwi_ng']:
            path = basename + '.initrd' + c['suffix']
        else:
            path = basename + c['suffix']
        if __salt__['file.file_exists'](os.path.join(dest, path)):
            res['initrd']['filename'] = path

            if res['kiwi_ng']:
                res['initrd'].update(get_md5(os.path.join(dest, path)))
            else:
                res['initrd'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5')))
            break

    if res['kiwi_ng']:
        path = os.path.join(dest, basename + '-' + res['kernel']['version'] + '.kernel')
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '-' + res['kernel']['version'] + '.kernel'
            res['kernel'].update(get_md5(path))
    else:
        path = os.path.join(dest, basename + '.kernel.' + res['kernel']['version'])
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '.kernel.' + res['kernel']['version']
            res['kernel'].update(parse_kiwi_md5(path + '.md5'))

    return res

def inspect_bundles(dest, basename):
    res = []
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<basename>" + re.escape(basename) + r")-(?P<id>[^.]*)\.(?P<suffix>.*)\.sha256$")
    for f in files:
        match = pattern.match(f)
        if match:
            res1 = match.groupdict()
            sha256_file = f

            sha256_str = __salt__['cp.get_file_str'](os.path.join(dest, sha256_file))
            pattern2 = re.compile(r"^(?P<hash>[0-9a-f]+)\s+(?P<filename>.*)\s*$")
            match = pattern2.match(sha256_str)
            if match:
                d = match.groupdict()
                d['hash'] = 'sha256:{0}'.format(d['hash'])
                res1.update(d)
                res1['filepath'] = os.path.join(dest, res1['filename'])

            else:
                # only hash without file name
                pattern2 = re.compile(r"^(?P<hash>[0-9a-f]+)$")
                match = pattern2.match(sha256_str)
                if match:
                    res1['hash'] = 'sha256:{0}'.format(match.groupdict()['hash'])
                    res1['filename'] = sha256_file[0:-len('.sha256')]
                    res1['filepath'] = os.path.join(dest, res1['filename'])
            res.append(res1)

    return res
   070701000000BA000081B400000000000000000000000163F87E3000000FEB000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/kiwi_source.py    import salt.exceptions
import logging
import os
from tempfile import mkdtemp
try:
    from urllib.parse import urlparse
except ImportError:
     from urlparse import urlparse

log = logging.getLogger(__name__)

# valid prefixes taken from Docker-CE to be compatible
valid_git_prefixes = ['http://', 'https://', 'git://', 'github.com/', 'git@']
valid_url_prefixes = ['http://', 'https://']
valid_url_suffixes = ['.tar.gz', '.tar.xz', '.tar.bz2', '.tgz', '.tar']

def _isLocal(source):
  return __salt__['file.directory_exists'](source)

def _isGit(source):
  for prefix in valid_git_prefixes:
    if source.startswith(prefix):
      return True
  return False

def _isTarball(source):
  prefix_ok = False
  for prefix in valid_url_prefixes:
    if source.startswith(prefix):
      prefix_ok = True
      break

  if not prefix_ok:
    return False

  for suffix in valid_url_suffixes:
    if source.endswith(suffix):
      return True

  return False

def _prepareDestDir(dest):
  '''
  Check target directory does not exists
  '''
  if os.path.isdir(dest):
    raise salt.exceptions.SaltException('Working directory "{0}" exists before sources are prepared'.format(dest))

def _prepareLocal(source, dest):
  '''
  Make link from `source` to `dest`
  '''
  log.debug('Source is local directory')
  _prepareDestDir(dest)
  __salt__['file.symlink'](source, dest)
  return dest

def _prepareHTTP(source, dest):
  '''
  Download tarball and extract to the directory
  '''
  log.debug('Source is HTTP')
  _prepareDestDir(dest)

  filename = os.path.join(dest, source.split("/")[-1])
  res = __salt__['state.single']('file.managed', filename, source=source, makedirs=True, skip_verify=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  res = __salt__['state.single']('archive.extracted', name=dest, source=filename, skip_verify=True, overwrite=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  return dest

def _prepareGit(source, dest, root):
  _prepareDestDir(dest)

  # checkout git into temporary directory in our build root
  # this is needed if we are interested only in git subtree
  tmpdir = __salt__['temp.dir'](parent=root)

  rev = 'master'
  subdir = None
  url = None

  # parse git uri - i.e. git@github.com/repo/#rev:sub
  # compatible with docker as per https://docs.docker.com/engine/reference/commandline/build/#git-repositories

  try:
    url, fragment = source.split('#', 1)
    try:
      rev, subdir = fragment.split(':', 1)
    except:
      rev = fragment
  except:
    url = source

  # omitted rev means default 'master' branch revision
  if rev == '':
    rev = 'master'

  log.debug('GIT URL: {0}, Revision: {1}, subdir: {2}'.format(url, rev, subdir))
  __salt__['git.init'](tmpdir)
  __salt__['git.remote_set'](tmpdir, url)
  __salt__['git.fetch'](tmpdir)
  __salt__['git.checkout'](tmpdir, rev=rev)

  if subdir:
    if _isLocal(os.path.join(tmpdir, subdir)):
      __salt__['file.symlink'](os.path.join(tmpdir, subdir), dest)
    else:
      raise salt.exceptions.SaltException('Directory is not present in checked out source: {}'.format(subdir))
  else:
    __salt__['file.symlink'](tmpdir, dest)
  return dest

def prepare_source(source, root):
  '''
  Prepare source directory based on different source types.

  source -- string with either local directory path, remote http(s) archive or git repository
  root   -- local directory where to store processed source files

  For git repository following format is understood:
    [http[s]://|git://][user@]hostname/repository[#revision[:subdirectory]]
  '''
  dest = os.path.join(root, 'source')
  log.debug('Preparing build source for {0} to {1}'.format(source, dest))
  if _isLocal(source):
    return _prepareLocal(source, dest)
  elif _isTarball(source):
    return _prepareHTTP(source, dest)
  elif _isGit(source):
    return _prepareGit(source, dest, root)
  else:
    raise salt.exceptions.SaltException('Unknown source format "{0}"'.format(source))
 070701000000BB000081B400000000000000000000000163F87E300000041F000000000000000000000000000000000000003000000000susemanager-sls/src/modules/mainframesysinfo.py   # -*- coding: utf-8 -*-
'''
s390 utility for Suse Manager

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
import os

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only works if /usr/bin/read_values is accessible
    '''
    return os.access('/usr/bin/read_values', os.X_OK) or \
        os.access('/proc/sysinfo', os.R_OK)


def read_values():
    '''
    Executes /usr/bin/read_values or if not available
    falls back to 'cat /proc/sysinfo'

    CLI Example:

    .. code-block:: bash

        salt '*' mainframesysinfo.read_values
    '''
    if os.access('/usr/bin/read_values', os.X_OK):
        cmd = '/usr/bin/read_values -s'
    else:    
        cmd = 'cat /proc/sysinfo'
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
        raise CommandExecutionError(result['stderr'])

    return result['stdout'] 070701000000BC000081B400000000000000000000000163F87E3000004891000000000000000000000000000000000000003100000000susemanager-sls/src/modules/mgr_caasp_manager.py  # -*- coding: utf-8 -*-
'''
SUSE Manager CaaSP Cluster Manager module for Salt

'''
from __future__ import absolute_import


import logging
import os
import subprocess

import salt.utils.stringutils
import salt.utils.timed_subprocess

try:
    from salt.utils.path import which
except ImportError:
    from salt.utils import which

from salt.utils.dictupdate import merge_list
from salt.exceptions import CommandExecutionError


log = logging.getLogger(__name__)

__virtualname__ = 'caasp'

DEFAULT_TIMEOUT = 1200


def __virtual__():
    '''
    This module requires that 'skuba' and 'kubectl' CLI tools are available.
    '''
    if not which('skuba'):
        return (False, 'skuba is not available in the minion')
    if not which('kubectl'):
        return (False, 'kubectl is not available in the minion')
    else:
        return __virtualname__


def _call_skuba(skuba_cluster_path,
                cmd_args,
                timeout=DEFAULT_TIMEOUT,
                **kwargs):

    log.debug("Calling Skuba CLI: 'skuba {}' - Timeout: {}".format(cmd_args, timeout))
    try:
        skuba_proc = salt.utils.timed_subprocess.TimedProc(
            ["skuba"] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout,
            cwd=skuba_cluster_path,
        )
        skuba_proc.run()
        return skuba_proc
    except Exception as exc:
        error_msg = "Unexpected error while calling skuba: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)


def _call_kubectl(kubectl_config_path,
                  cmd_args,
                  timeout=DEFAULT_TIMEOUT,
                  **kwargs):

    newenv = os.environ
    newenv['KUBECONFIG'] = os.path.join(kubectl_config_path, 'admin.conf')

    log.debug("Calling kubectl CLI: 'kubectl {}' - KUBECONFIG: {} - Timeout: {}".format(cmd_args, newenv['KUBECONFIG'], timeout))
    try:
        kubectl_proc = salt.utils.timed_subprocess.TimedProc(
            ["kubectl"] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout,
            cwd=kubectl_config_path,
            env=newenv,
        )
        kubectl_proc.run()
        return kubectl_proc
    except Exception as exc:
        error_msg = "Unexpected error while calling kubectl: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)


def _sanitize_skuba_output_values(items):
    ret = []
    for i in items:
        if i.lower() == 'no':
            ret.append(False)
        elif i.lower() == 'yes':
            ret.append(True)
        elif i.lower() == '<none>':
            ret.append(None)
        else:
            ret.append(i)
    return ret


def list_nodes(skuba_cluster_path,
               timeout=DEFAULT_TIMEOUT,
               **kwargs):
    skuba_proc = _call_skuba(skuba_cluster_path, "cluster status", timeout=timeout)
    if skuba_proc.process.returncode != 0 or skuba_proc.stderr:
        error_msg = "Unexpected error {} at skuba when listing nodes: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    skuba_proc_lines = salt.utils.stringutils.to_str(skuba_proc.stdout).splitlines()

    ret = {}
    try:
        # The first line of skuba output are the headers
        headers = [x.strip().lower() for x in skuba_proc_lines[0].split('  ') if x]
        name_idx = headers.index('name')
        headers.remove('name')
        for line in skuba_proc_lines[1:]:
            items = [x.strip() for x in line.split('  ') if x]
            node_name = items.pop(name_idx)
            node_zip = zip(headers, _sanitize_skuba_output_values(items))
            ret[node_name] = dict(node_zip)
    except Exception as exc:
        error_msg = "Unexpected error while parsing skuba output: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    # The following is a hack to enrich skuba result with the machine-id of every node
    # We need to query kubectl to retrieve the machine-id
    kubectl_proc = _call_kubectl(skuba_cluster_path, "get nodes -o json", timeout=timeout)
    if kubectl_proc.process.returncode != 0 or kubectl_proc.stderr:
        error_msg = "Unexpected error {} at kubectl when getting nodes: {}".format(
                kubectl_proc.process.returncode,
                salt.utils.stringutils.to_str(kubectl_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    kubectl_response = salt.utils.yaml.safe_load(kubectl_proc.stdout)

    for node in kubectl_response.get('items', []):
        node_name = node['metadata']['name']
        if node_name in ret.keys():
            ret[node_name]['machine-id'] = node['status']['nodeInfo']['machineID']
            ret[node_name]['internal-ips'] = list(map(lambda x: x['address'],
                                                  filter(lambda x: x['type'] == "InternalIP",
                                                         node['status']['addresses'])))
        else:
            error_msg = "Node returned from Kubernetes API not known to skuba: {}".format(node.metadata.name)
            log.error(error_msg)

    return ret


def remove_node(skuba_cluster_path,
                node_name,
                drain_timeout=None,
                verbosity=None,
                timeout=DEFAULT_TIMEOUT,
                **kwargs):

    cmd_args = "node remove {}".format(node_name)

    if drain_timeout:
        cmd_args += " --drain-timeout {}".format(drain_timeout)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when removing a node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def add_node(skuba_cluster_path,
             node_name,
             role,
             target,
             ignore_preflight_errors=None,
             port=None,
             sudo=None,
             user=None,
             verbosity=None,
             timeout=DEFAULT_TIMEOUT,
             **kwargs):

    cmd_args = "node join --role {} --target {} {}".format(role, target, node_name)

    if ignore_preflight_errors:
        cmd_args += " --ignore-preflight-errors {}".format(ignore_preflight_errors)
    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when adding a new node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def _upgrade_cluster_plan(skuba_cluster_path,
                          verbosity=None,
                          timeout=DEFAULT_TIMEOUT,
                          **kwargs):

    cmd_args = "cluster upgrade plan"

    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading the cluster: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def upgrade_cluster(skuba_cluster_path,
                    verbosity=None,
                    timeout=DEFAULT_TIMEOUT,
                    plan=False,
                    **kwargs):

    if plan:
        return _upgrade_cluster_plan(skuba_cluster_path=skuba_cluster_path,
                                     verbosity=verbosity,
                                     timeout=timeout,
                                     **kwargs)

    # Perform the cluster upgrade procedure.
    # 1. Upgrade addons
    # 2. Upgrade all nodes
    # 3. Upgrade addons
    ret = {
        'success' : True,
        'retcode' : 0,
        'stage0_upgrade_addons': {},
        'stage1_upgrade_nodes': {},
        'stage2_upgrade_addons': {},
    }

    ret['stage0_upgrade_addons'] = upgrade_addons(skuba_cluster_path=skuba_cluster_path,
                                                  verbosity=verbosity,
                                                  timeout=timeout,
                                                  plan=plan,
                                                  **kwargs)

    if not ret['stage0_upgrade_addons']['success']:
        ret['success'] = False
        return ret

    nodes = list_nodes(skuba_cluster_path=skuba_cluster_path,
                       timeout=timeout,
                       **kwargs)

    # Ensure master nodes are upgraded first
    for node, _ in sorted(nodes.items(), key=lambda x: 0 if x[1].get('role') == 'master' else 1):
        if not nodes[node]['internal-ips']:
            log.error('No internal-ips defined for node: {}. Cannot proceed upgrading this node!'.format(node))
            continue

        ret['stage1_upgrade_nodes'][node] = upgrade_node(skuba_cluster_path=skuba_cluster_path,
                                                         target=nodes[node]['internal-ips'][0],
                                                         verbosity=verbosity,
                                                         timeout=timeout,
                                                         plan=plan,
                                                         **kwargs)

        if not ret['stage1_upgrade_nodes'][node]['success']:
            ret['success'] = False

    ret['stage2_upgrade_addons'] = upgrade_addons(skuba_cluster_path=skuba_cluster_path,
                                                  verbosity=verbosity,
                                                  timeout=timeout,
                                                  plan=plan,
                                                  **kwargs)

    if not ret['stage2_upgrade_addons']['success']:
        ret['success'] = False

    if not ret['success']:
        ret['retcode'] = 1

    return ret


def upgrade_addons(skuba_cluster_path,
                   verbosity=None,
                   timeout=DEFAULT_TIMEOUT,
                   plan=False,
                   **kwargs):

    cmd_args = "addon upgrade {}".format("plan" if plan else "apply")

    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading addons: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def upgrade_node(skuba_cluster_path,
                 node_name=None,
                 target=None,
                 port=None,
                 sudo=None,
                 user=None,
                 verbosity=None,
                 timeout=DEFAULT_TIMEOUT,
                 plan=False,
                 **kwargs):

    if plan and not node_name:
        error_msg = "The 'node_name' argument is required if plan=True"
        log.error(error_msg)
        raise CommandExecutionError(error_msg)
    elif not plan and not target:
        error_msg = "The 'target' argument is required without plan=True"
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    if plan:
        cmd_args = "node upgrade plan {}".format(node_name)
    else:
        cmd_args = "node upgrade apply --target {}".format(target)

    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def cluster_init(cluster_name,
                 cluster_basedir,
                 target,
                 cloud_provider=None,
                 strict_capability_defaults=False,
                 verbosity=None,
                 timeout=DEFAULT_TIMEOUT,
                 **kwargs):

    cmd_args = "cluster init --control-plane {} {}".format(target, cluster_name)

    if cloud_provider:
        cmd_args += " --cloud-provider {}".format(cloud_provider)
    if strict_capability_defaults:
        cmd_args += " --strict-capability-defaults"
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(cluster_basedir, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when initializing the cluster: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def master_bootstrap(node_name,
                     skuba_cluster_path,
                     target,
                     ignore_preflight_errors=None,
                     port=None,
                     sudo=None,
                     user=None,
                     verbosity=None,
                     timeout=DEFAULT_TIMEOUT,
                     **kwargs):

    cmd_args = "node bootstrap --target {} {}".format(target, node_name)

    if ignore_preflight_errors:
        cmd_args += " --ignore-preflight-errors {}".format(ignore_preflight_errors)
    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when bootstrapping the node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def _join_return_dicts(ret1, ret2):
    ret = merge_list(ret1, ret2)

    # Join multiple 'stdout' and 'stderr' outputs
    # after merging the two output dicts
    if isinstance(ret['stdout'], list):
        ret['stdout'] = ''.join(ret['stdout'])
    if isinstance(ret['stderr'], list):
        ret['stderr'] = ''.join(ret['stderr'])

    # We only need the latest 'success' and 'retcode'
    # values after merging the two output dicts.
    ret['success'] = ret['success'][1]
    ret['retcode'] = ret['retcode'][1]

    return ret

def create_cluster(cluster_name,
                   cluster_basedir,
                   first_node_name,
                   target,
                   cloud_provider=None,
                   strict_capability_defaults=False,
                   load_balancer=None,
                   verbosity=None,
                   timeout=DEFAULT_TIMEOUT,
                   **kwargs):

    ret = cluster_init(cluster_name=cluster_name,
                       cluster_basedir=cluster_basedir,
                       target=load_balancer if load_balancer else target,
                       cloud_provider=cloud_provider,
                       strict_capability_defaults=strict_capability_defaults,
                       verbosity=verbosity,
                       timeout=timeout,
                       **kwargs)

    if not ret['success']:
        return ret

    ret = _join_return_dicts(ret, master_bootstrap(node_name=first_node_name,
                                                   skuba_cluster_path=os.path.join(cluster_basedir, cluster_name),
                                                   target=target,
                                                   verbosity=verbosity,
                                                   timeout=timeout,
                                                   **kwargs))

    return ret
   070701000000BD000081B400000000000000000000000163F87E30000025ED000000000000000000000000000000000000002F00000000susemanager-sls/src/modules/mgractionchains.py    # -*- coding: utf-8 -*-
'''
SUSE Manager Action Chains module for Salt

'''
from __future__ import absolute_import

import logging
import os
import sys
import salt.config
import salt.syspaths
import yaml
from salt.utils.yamlloader import SaltYamlSafeLoader

# Prevent issues due 'salt.utils.fopen' deprecation
try:
    from salt.utils import fopen
except:
    from salt.utils.files import fopen

from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'mgractionchains'

SALT_ACTIONCHAIN_BASE = 'actionchains'


def __virtual__():
    '''
    This module is always enabled while 'state.sls' is available.
    '''
    return __virtualname__ if 'state.sls' in __salt__ else (False, 'state.sls is not available')

def _calculate_sls(actionchain_id, machine_id, chunk):
    return '{0}.actionchain_{1}_{2}_{3}'.format(SALT_ACTIONCHAIN_BASE,
                                                actionchain_id,
                                                machine_id,
                                                chunk)

def _get_ac_storage_filenamepath():
    '''
    Calculate the filepath to the '_mgractionchains.conf' which is placed
    by default in /etc/salt/minion.d/
    '''
    config_dir = __opts__.get('conf_dir', None)
    if config_dir is None and 'conf_file' in __opts__:
        config_dir = os.path.dirname(__opts__['conf_file'])
    if config_dir is None:
        config_dir = salt.syspaths.CONFIG_DIR

    minion_d_dir = os.path.join(
        config_dir,
        os.path.dirname(__opts__.get('default_include',
                                      salt.config.DEFAULT_MINION_OPTS['default_include'])))

    return os.path.join(minion_d_dir, '_mgractionchains.conf')

def check_reboot_required(target_sls):
    '''
    Used this function for transactional update system. 
    Check if the sls file contains reboot_required paramer in schedule_next_chuck. 
    If it exists and set to true, the system is reboot when the sls file execution is completed
    :param target_sls: sls filename
    :return: True if the system requires a reboot at the end of the transaction
    '''
    sls_file_on_minion = __salt__['cp.cache_file']('{0}{1}.sls'.format('salt://actionchains/', target_sls.replace('actionchains.','')))
    current_state_info = _read_sls_file(sls_file_on_minion)
    if not current_state_info or not 'schedule_next_chunk' in current_state_info:
        # schedule_next_chunk contains information about how to restart the action chain after a reboot, so it's present
        # only if there's a reboot action or a salt upgrade. If there's no action that perform a reboot, schedule_next_chunk
        # it's not present.
        return False
    if not 'mgrcompat.module_run' in current_state_info['schedule_next_chunk']:
        log.error("Cannot check if reboot is needed as \"schedule_next_chunk\" is not containing expected attributes.")
        return False

    list_param = current_state_info['schedule_next_chunk']['mgrcompat.module_run']

    for dic in list_param:
        if 'reboot_required' in dic:
            return dic["reboot_required"]
    return False

def _read_next_ac_chunk(clear=True):
    '''
    Read and remove the content of '_mgractionchains.conf' file. Return the parsed YAML.
    '''
    f_storage_filename = _get_ac_storage_filenamepath()
    ret = _read_sls_file(f_storage_filename)
    if ret is None:
        return None
    if clear:
        os.remove(f_storage_filename)
    return ret

def _read_sls_file(filename):
    if not os.path.isfile(filename):
        log.debug("File {0} does not exists".format(filename))
        return None
    ret = None
    try:
        with fopen(filename, "r") as f:
            ret = yaml.load(f.read(), Loader=SaltYamlSafeLoader)
        return ret
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error processing YAML from '{0}': {1}".format(filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def _add_boot_time(next_chunk, prefix):
    '''
    Add the current boot time to the next_chunk dict
    '''
    uptime = __salt__["status.uptime"]()
    next_chunk["{0}_boot_time".format(prefix)] = uptime["since_iso"]

def _persist_next_ac_chunk(next_chunk):
    '''
    Persist next_chunk to execute as YAML in '_mgractionchains.conf'
    '''
    _add_boot_time(next_chunk, "persist")
    f_storage_filename = _get_ac_storage_filenamepath()
    try:
        f_storage_dir = os.path.dirname(f_storage_filename);
        if not os.path.exists(f_storage_dir):
            os.makedirs(f_storage_dir)
        with fopen(f_storage_filename, "w") as f_storage:
            f_storage.write(yaml.dump(next_chunk))
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error writing YAML from '{0}': {1}".format(f_storage_filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def start(actionchain_id):
    '''
    Start the execution of the given SUSE Manager Action Chain

    actionchain_id
        The SUSE Manager Actionchain ID to execute on this minion.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.start 123
    '''
    if os.path.isfile(_get_ac_storage_filenamepath()):
        msg = "Action Chain '{0}' cannot be started. There is already another " \
              "Action Chain being executed. Please check file '{1}'".format(
                actionchain_id, _get_ac_storage_filenamepath())
        log.error(msg)
        raise CommandExecutionError(msg)
    target_sls = _calculate_sls(actionchain_id, __grains__['machine_id'], 1)
    log.debug("Starting execution of SUSE Manager Action Chains ID "
              "'{0}' -> Target SLS: {1}".format(actionchain_id, target_sls))
    try:
        __salt__['saltutil.sync_states']()
        __salt__['saltutil.sync_modules']()
    except Exception as exc:
        log.error("There was an error while syncing custom states and execution modules")

    transactional_update = __grains__.get("transactional")
    reboot_required = False
    inside_transaction = False
    if transactional_update:
        reboot_required = check_reboot_required(target_sls)
        inside_transaction = os.environ.get("TRANSACTIONAL_UPDATE")

    if transactional_update and not inside_transaction:
        ret = __salt__['transactional_update.sls'](target_sls, queue=True, activate_transaction=reboot_required)
    else:
        ret = __salt__['state.sls'](target_sls, queue=True)

    if isinstance(ret, list):
        raise CommandExecutionError(ret)
    return ret

def next(actionchain_id, chunk, next_action_id=None, current_action_id=None,  ssh_extra_filerefs=None, reboot_required=False):
    '''
    Persist the next Action Chain chunk to be executed by the 'resume' method.

    next_chunk
        The next target SLS to be executed.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.next actionchains.actionchain_123_machineid_2
    '''
    yaml_dict = {
        'next_chunk': _calculate_sls(actionchain_id, __grains__['machine_id'], chunk)
    }
    yaml_dict['actionchain_id'] = actionchain_id
    if next_action_id:
        yaml_dict['next_action_id'] = next_action_id
    if current_action_id:
        yaml_dict['current_action_id'] = current_action_id
    if ssh_extra_filerefs:
        yaml_dict['ssh_extra_filerefs'] = ssh_extra_filerefs
    if reboot_required:
        yaml_dict['reboot_required'] = reboot_required
    _persist_next_ac_chunk(yaml_dict)
    return yaml_dict

def get_pending_resume():
    '''
    Get information about any pending action chain chunk execution.
    '''
    next_chunk = _read_next_ac_chunk(False)
    if next_chunk:
        _add_boot_time(next_chunk, "current")
    return next_chunk or {}



def resume():
    '''
    Continue the execution of a SUSE Manager Action Chain.
    This will trigger the execution of the next chunk SLS file stored on '_mgractionchains.conf'

    This method is called by the Salt Reactor as a response to the 'minion/start/event'.
    '''
    ac_resume_info = _read_next_ac_chunk()
    if not ac_resume_info:
        return {}
    if type(ac_resume_info) != dict:
        err_str = "Not able to resume Action Chain execution! Malformed " \
                  "'_mgractionchains.conf' found: {0}".format(ac_resume_info)
        log.error(err_str)
        raise CommandExecutionError(err_str)
    next_chunk = ac_resume_info.get('next_chunk')
    log.debug("Resuming execution of SUSE Manager Action Chain -> Target SLS: "
              "{0}".format(next_chunk))
    
    transactional_update = __grains__.get("transactional")
    reboot_required = False
    inside_transaction = False
    if transactional_update:
        reboot_required = ac_resume_info.get('reboot_required')
        inside_transaction = os.environ.get("TRANSACTIONAL_UPDATE")

    if transactional_update and not inside_transaction:
        ret = __salt__['transactional_update.sls'](next_chunk, queue=True, activate_transaction=reboot_required)
    else:
        ret = __salt__['state.sls'](next_chunk, queue=True)

    if isinstance(ret, list):
        raise CommandExecutionError(ret)
    return ret

def clean(actionchain_id=None, current_action_id=None, reboot_required=None):
    '''
    Clean execution of an Action Chain by removing '_mgractionchains.conf'.
    '''
    _read_next_ac_chunk()
    yaml_dict = {}
    yaml_dict['success'] = True
    if actionchain_id:
        yaml_dict['actionchain_id'] = actionchain_id
    if current_action_id:
        yaml_dict['current_action_id'] = current_action_id
    if reboot_required:
        yaml_dict['reboot_required'] = reboot_required
    return yaml_dict
   070701000000BE000081B400000000000000000000000163F87E30000005F8000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/mgrclusters.py    # -*- coding: utf-8 -*-
'''
SUSE Manager Clusters Management module for Salt

'''
from __future__ import absolute_import

from salt.exceptions import CommandExecutionError
import logging

log = logging.getLogger(__name__)

__virtualname__ = 'mgrclusters'


def __virtual__():
    '''
    This module is always enabled while 'cmd.run' is available.
    '''
    return __virtualname__ if 'cmd.run' in __salt__ else (False, 'cmd.run is not available')


def _get_provider_fun(provider_module, fun):
    fun_key = "{}.{}".format(provider_module, fun)
    if not provider_module:
        raise CommandExecutionError("You must specify a valid cluster provider module: {}".format(provider_module))
    elif fun_key in __salt__:
        return __salt__[fun_key]
    else:
        raise CommandExecutionError("The selected cluster provider cannot be found: {}".format(provider_module))


def list_nodes(provider_module, params):
    fun = _get_provider_fun(provider_module, 'list_nodes')
    return fun(**params)


def add_node(provider_module, params):
    fun = _get_provider_fun(provider_module, 'add_node')
    return fun(**params)


def remove_node(provider_module, params):
    fun = _get_provider_fun(provider_module, 'remove_node')
    return fun(**params)


def upgrade_cluster(provider_module, params):
    fun = _get_provider_fun(provider_module, 'upgrade_cluster')
    return fun(**params)


def create_cluster(provider_module, params):
    fun = _get_provider_fun(provider_module, 'create_cluster')
    return fun(**params)
070701000000BF000081B400000000000000000000000163F87E3000000B17000000000000000000000000000000000000002600000000susemanager-sls/src/modules/mgrnet.py """
Module for gathering DNS FQDNs
"""

import logging
import re
import time
from concurrent.futures import ThreadPoolExecutor, as_completed

import salt.utils.network

log = logging.getLogger(__name__)


def __virtual__():
    """
    Only works on POSIX-like systems having 'host' or 'nslookup' available
    """
    if not (__utils__["path.which"]("host") or __utils__["path.which"]("nslookup")):
        return (False, "Neither 'host' nor 'nslookup' is available on the system")
    return True


def dns_fqdns():
    """
    Return all known DNS FQDNs for the system by enumerating all interfaces and
    then trying to reverse resolve them with native DNS tools
    """
    # Provides:
    # dns_fqdns

    grains = {}
    fqdns = set()
    cmd_run_all_func = __salt__["cmd.run_all"]
    if __utils__["path.which"]("host"):
        cmd = "host"
        cmd_ret_regex = re.compile(r".* domain name pointer (.*)\.$")
    elif __utils__["path.which"]("nslookup"):
        cmd = "nslookup"
        cmd_ret_regex = re.compile(r".*\tname = (.*)\.$")
    else:
        log.error("Neither 'host' nor 'nslookup' is available on the system")
        return {"dns_fqdns": []}

    def _lookup_dns_fqdn(ip):
        try:
            ret = cmd_run_all_func([cmd, ip], ignore_retcode=True)
        except Exception as e:
            log.error("Error while trying to use '%s' to resolve '%s': %s", cmd, ip, e)
        if ret["retcode"] != 0:
            log.debug("Unable to resolve '%s' using '%s': %s", ip, cmd, ret)
            return []
        fqdns = []
        for line in ret["stdout"].split("\n"):
            match = cmd_ret_regex.match(line)
            if match:
                fqdns.append(match.group(1))
        return fqdns

    start = time.time()

    addresses = salt.utils.network.ip_addrs(
        include_loopback=False, interface_data=salt.utils.network._get_interfaces()
    )
    addresses.extend(
        salt.utils.network.ip_addrs6(
            include_loopback=False, interface_data=salt.utils.network._get_interfaces()
        )
    )

    results = []
    try:
        # Create a ThreadPoolExecutor to process the underlying calls
        # to resolve DNS FQDNs in parallel.
        with ThreadPoolExecutor(max_workers=8) as executor:
            results = dict((executor.submit(_lookup_dns_fqdn, ip), ip) for ip in addresses)
            for item in as_completed(results):
                item = item.result()
                if item:
                    fqdns.update(item)
    except Exception as exc:  # pylint: disable=broad-except
        log.error(
            "Exception while running ThreadPoolExecutor for FQDNs resolution: %s",
            exc,
        )

    elapsed = time.time() - start
    log.debug("Elapsed time getting DNS FQDNs: %s seconds", elapsed)

    return {"dns_fqdns": sorted(list(fqdns))}
 070701000000C0000081B400000000000000000000000163F87E3000000977000000000000000000000000000000000000002900000000susemanager-sls/src/modules/ssh_agent.py  import logging
import subprocess
import salt.utils.timed_subprocess
from salt.exceptions import CommandExecutionError
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

log = logging.getLogger(__name__)

__virtualname__ = 'ssh_agent'

__ssh_agent = '/usr/bin/ssh-agent'
__ssh_add = '/usr/bin/ssh-add'

def __virtual__():
    '''
    This module is always enabled while 'ssh-agent' is available.
    '''
    return __virtualname__ if _which_bin(['ssh-agent']) else (False, 'ssh-agent is not available')

def __call_ssh_tool(ssh_tool, cmd_args = "", **kwargs):
    log.debug("Calling ssh-agent: '{} {}'".format(ssh_tool, cmd_args))
    try:
        ssh_tool_proc = salt.utils.timed_subprocess.TimedProc(
            [ssh_tool] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE
        )
        ssh_tool_proc.run()
    except Exception as exc:
        error_msg = "Unexpected error while calling {}: {}".format(ssh_tool, exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)    

    if ssh_tool_proc.process.returncode != 0:
        error_msg = "Unexpected error {} when calling {} {}: {} {}".format(
                ssh_tool_proc.process.returncode,
                ssh_tool,
                cmd_args,
                salt.utils.stringutils.to_str(ssh_tool_proc.stdout),
                salt.utils.stringutils.to_str(ssh_tool_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    return ssh_tool_proc


def start_agent(**kwargs):
    result = __call_ssh_tool(__ssh_agent)

    stdout = salt.utils.stringutils.to_str(result.stdout)
    ssh_agent_lines = stdout.splitlines()

    variables = dict()
    for line in ssh_agent_lines:
        if line.startswith('SSH'):
            line_content_list = line.split(';')
            var, rest = line_content_list[0], line_content_list[1:]
            key, val = var.strip().split("=", 1)
            variables[key] = val

    __salt__['environ.setenv'](variables)
    return variables


def list_keys(**kwargs):
    result = __call_ssh_tool(__ssh_add, "-l")
    return salt.utils.stringutils.to_str(result.stdout)


def add_key(ssh_key_file, **kwargs):
    __call_ssh_tool(__ssh_add, ssh_key_file)
    return True

def kill(**kwargs):
    __call_ssh_tool(__ssh_agent, "-k")
    return True
 070701000000C1000081B400000000000000000000000163F87E3000001333000000000000000000000000000000000000002800000000susemanager-sls/src/modules/sumautil.py   # -*- coding: utf-8 -*-
'''
Utility module for Suse Manager

'''
from __future__ import absolute_import

import logging
import socket
import os
import re
import time
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)

__virtualname__ = 'sumautil'

SYSFS_NET_PATH = '/sys/class/net'


def __virtual__():
    '''
    Only run on Linux systems
    '''
    return __grains__['kernel'] == 'Linux' and __virtualname__ or False


def cat(path):
    '''
    Cat the specified file.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.cat /tmp/file
    '''
    cmd = 'cat %s' % path
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
       return {'retcode': 1, 'stderr': result['stderr']}

    return {'retcode': 0, 'stdout': result['stdout']}


def primary_ips():
    '''
    Get the source IPs that the minion uses to connect to the master.
    Returns the IPv4 and IPv6 address (if available).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.primary_ip
    '''

    get_master_ip = lambda family, host: socket.getaddrinfo(host, 0, family)[0][-1][0]

    master = __opts__.get('master', '')
    log.debug('Using master: {0}'.format(str(master)))

    ret = dict()
    for sock_family, sock_descr in list({socket.AF_INET: 'IPv4', socket.AF_INET6: 'IPv6'}.items()):
        try:
            ret['{0}'.format(sock_descr)] = __salt__['network.get_route'](get_master_ip(sock_family, master))
            log.debug("network.get_route({0}): ".format(ret['{0} source'.format(sock_descr)]))
        except Exception as err:
            log.debug('{0} is not available? {1}'.format(sock_descr, err))

    return ret


def get_net_module(iface):
    '''
    Returns the kernel module used for the give interface
    or None if the module could not be determined of if the
    interface name is wrong.
    Uses '/sys/class/net' to find out the module.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_module eth0
    '''
    sysfspath = os.path.join(SYSFS_NET_PATH, iface, 'device/driver')

    return os.path.exists(sysfspath) and os.path.split(os.readlink(sysfspath))[-1] or None


def get_net_modules():
    '''
    Returns a dictionary of all network interfaces and their
    corresponding kernel module (if it could be determined).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_modules
    '''
    drivers = dict()
    for devdir in os.listdir(SYSFS_NET_PATH):
        try:
            drivers[devdir] = get_net_module(devdir)
        except OSError as devdir:
            log.warning("An error occurred getting net driver for {0}".format(devdir), exc_info=True)

    return drivers or None

def get_kernel_live_version():
    '''
    Returns the patch version of live patching if it is active,
    otherwise None

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_kernel_live_version
    '''
    kernel_live_version = _klp()
    if not kernel_live_version:
        log.debug("No kernel live patch is active")

    return kernel_live_version

def _klp():
    '''
    klp to identify the current kernel live patch

    :return:
    '''
    # get 'kgr' for versions prior to SLE 15
    try:
        from salt.utils.path import which_bin as _which_bin
    except:
        from salt.utils import which_bin as _which_bin

    klp = _which_bin(['klp', 'kgr'])
    patchname = None
    if klp is not None:
        try:
            # loop until patching is finished
            for i in range(10):
                stat = __salt__['cmd.run_all']('{0} status'.format(klp), output_loglevel='quiet')
                log.debug("klp status: {0}".format(stat['stdout']))
                if stat['stdout'].strip().splitlines()[0] == 'ready':
                    break
                time.sleep(1)
            re_active = re.compile(r"^\s+active:\s*(\d+)$")
            ret = __salt__['cmd.run_all']('{0} -v patches'.format(klp), output_loglevel='quiet')
            log.debug("klp patches: {0}".format(ret['stdout']))
            if ret['retcode'] == 0:
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue

                    match_active = re_active.match(line)
                    if match_active and int(match_active.group(1)) > 0:
                        return {'mgr_kernel_live_version': patchname }
                    elif line.startswith('kgraft') or line.startswith('livepatch'):
                        # kgr patches have prefix 'kgraft', whereas klp patches start with 'livepatch'
                        patchname = line.strip()

        except Exception as error:
            log.error("klp: {0}".format(str(error)))
 070701000000C2000081B400000000000000000000000163F87E3000000B37000000000000000000000000000000000000002600000000susemanager-sls/src/modules/udevdb.py # -*- coding: utf-8 -*-
'''
Export udev database

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only work when udevadm is installed.
    '''
    return _which_bin(['udevadm']) is not None


def exportdb():
    '''
    Extract all info delivered by udevadm

    CLI Example:

    .. code-block:: bash

        salt '*' udev.info /dev/sda
        salt '*' udev.info /sys/class/net/eth0
    '''

    cmd = 'udevadm info --export-db'
    udev_result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if udev_result['retcode'] != 0:
        raise CommandExecutionError(udev_result['stderr'])

    devices = []
    dev = {}
    for line in (line.strip() for line in udev_result['stdout'].splitlines()):
        if line:
            line = line.split(':', 1)
            if len(line) != 2:
                continue
            query, data = line
            if query == 'E':
                if query not in dev:
                    dev[query] = {}
                key, val = data.strip().split('=', 1)

                try:
                    val = int(val)
                except ValueError:
                    try:
                        val = float(val)
                    except ValueError:
                        pass  # Quiet, this is not a number.

                dev[query][key] = val
            else:
                if query not in dev:
                    dev[query] = []
                dev[query].append(data.strip())
        else:
            if dev:
                normalize(dev)
                add_scsi_info(dev)
                devices.append(dev)
                dev = {}
    if dev:
        normalize(dev)
        add_scsi_info(dev)
        devices.append(dev)

    return devices


def normalize(dev):
    '''
    Replace list with only one element to the value of the element.

    :param dev:
    :return:
    '''
    for sect, val in list(dev.items()):
        if isinstance(val, list) and len(val) == 1:
            dev[sect] = val[0]

    return dev


def add_scsi_info(dev):
    '''
    Add SCSI info from sysfs
    '''
    if dev.get('E') and dev.get('E').get('SUBSYSTEM') == 'scsi' and dev.get('E').get('DEVTYPE') == 'scsi_device':
        sysfs_path = dev['P']
        scsi_type = __salt__['cmd.run_all']('cat /sys/{0}/type'.format(sysfs_path), output_loglevel='quiet')

        if scsi_type['retcode'] != 0:
            raise CommandExecutionError(scsi_type['stderr'])

        dev['X-Mgr'] = {}
        dev['X-Mgr']['SCSI_SYS_TYPE'] = scsi_type['stdout']
 070701000000C3000081B400000000000000000000000163F87E300000FF96000000000000000000000000000000000000002C00000000susemanager-sls/src/modules/uyuni_config.py   # coding: utf-8
from typing import Any, Dict, List, Optional, Union, Tuple
import ssl
import xmlrpc.client  # type: ignore
import logging

import os
import salt.config
from salt.utils.minions import CkMinions
import datetime

AUTHENTICATION_ERROR = 2950

log = logging.getLogger(__name__)

__pillar__: Dict[str, Any] = {}
__context__: Dict[str, Any] = {}
__virtualname__: str = "uyuni"


class UyuniUsersException(Exception):
    """
    Uyuni users Exception
    """


class UyuniChannelsException(Exception):
    """
    Uyuni channels Exception
    """


class RPCClient:
    """
    RPC Client
    """

    def __init__(self, user: str = None, password: str = None, url: str = "https://localhost/rpc/api"):
        """
        XML-RPC client interface.

        :param user: username for the XML-RPC API endpoints
        :param password: password credentials for the XML-RPC API endpoints
        :param url: URL of the remote host
        """

        ctx: ssl.SSLContext = ssl.create_default_context()
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
        self.conn = xmlrpc.client.ServerProxy(url, context=ctx, use_datetime=True, use_builtin_types=True)
        if user is None or password is None:
            # if user or password not set, fallback to default user defined on pillar data
            if "xmlrpc" in (__pillar__ or {}).get("uyuni", {}):
                rpc_conf = (__pillar__ or {})["uyuni"]["xmlrpc"] or {}
                self._user: str = rpc_conf.get("user", "")
                self._password: str = rpc_conf.get("password", "")
            else:
                raise UyuniUsersException("Unable to find Pillar configuration for Uyuni XML-RPC API")
        else:
            self._user: str = user
            self._password: str = password

        self.token: Optional[str] = None

    def get_user(self):
        return self._user

    def get_token(self, refresh: bool = False) -> Optional[str]:
        """
        Authenticate.
        If a authentication token is present on __context__ it will be returned
        Otherwise get a new authentication token from xml rpc.
        If refresh is True, get a new token from the API regardless of prior status.

        :param refresh: force token refresh, discarding any cached value
        :return: authentication token
        """
        if self.token is None or refresh:
            try:
                auth_token_key = "uyuni.auth_token_" + self._user
                if (not auth_token_key in __context__) or refresh:
                    __context__[auth_token_key] = self.conn.auth.login(self._user, self._password)
            except Exception as exc:
                log.error("Unable to login to the Uyuni server: %s", exc)
                raise exc
            self.token = __context__[auth_token_key]
        return self.token

    def __call__(self, method: str, *args, **kwargs) -> Any:
        self.get_token()
        if self.token is not None:
            try:
                log.debug("Calling RPC method %s", method)
                return getattr(self.conn, method)(*((self.token,) + args))
            except Exception as exc:
                if exc.faultCode != AUTHENTICATION_ERROR:
                    log.error("Unable to call RPC function: %s", str(exc))
                    raise exc
                """
                Authentication error when using Token, it can have expired.
                Call a second time with a new session token
                """
                log.warning("Fall back to the second try due to %s", str(exc))
                try:
                    return getattr(self.conn, method)(*((self.get_token(refresh=True),) + args))
                except Exception as exc:
                    log.error("Unable to call RPC function: %s", str(exc))
                    raise exc

        raise UyuniUsersException("XML-RPC backend authentication error.")


class UyuniRemoteObject:
    """
    RPC client
    """

    def __init__(self, user: str = None, password: str = None):
        self.client: RPCClient = RPCClient(user=user, password=password)

    @staticmethod
    def _convert_datetime_str(response: Dict[str, Any]) -> Dict[str, Any]:
        """
        modify any key-value pair where value is a datetime object to a string.

        :param response: response dictionary to be processed

        :return: new dictionary with datetime objects converted to sting
        """
        if response:
            return dict(
                [
                    (k, "{0}".format(v)) if isinstance(v, datetime.datetime) else (k, v)
                    for k, v in response.items()
                ]
            )
        return None

    @staticmethod
    def _convert_datetime_list(response: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        modify any list of key-value pair where value is a datetime object to a string.

        :param response: list of dictionaries to be processed

        :return: List of new dictionaries with datetime objects converted to sting
        """
        if response:
            return [UyuniRemoteObject._convert_datetime_str(value) for value in response]
        return None

    @staticmethod
    def _convert_bool_response(response: int):
        return response == 1

class UyuniUser(UyuniRemoteObject):
    """
    CRUD operation on users.
    """

    def get_details(self, login: str) -> Dict[str, Any]:
        """
        Retrieve details of an Uyuni user.

        :param: login: user name to lookup

        :return: Dictionary with user details
        """
        return self.client("user.getDetails", login)

    def list_users(self) -> List[Dict[str, Any]]:
        """
        Return all Uyuni users visible to the authenticated user.

        :return: all users visible to the authenticated user
        """
        return self.client("user.listUsers")

    def create(self, login: str, password: str, email: str, first_name: str = "", last_name: str = "",
               use_pam_auth: bool = False) -> bool:
        """
        Create an Uyuni user.
        User will be created in the same organization as the authenticated user.

        :param login: desired login name
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name
        :param use_pam_auth: if you wish to use PAM authentication for this user

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.create", login, password,
                                                       first_name, last_name, email, int(use_pam_auth)))

    def set_details(self, login: str, password: str, email: str, first_name: str = "", last_name: str = "") -> bool:
        """
        Update an Uyuni user information.

        :param login: login name
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.setDetails", login, {
            "password": password,
            "first_name": first_name,
            "last_name": last_name,
            "email": email
        }))

    def delete(self, login: str) -> bool:
        """
        Remove an Uyuni user.

        :param login: login of the user

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.delete", login))

    def list_roles(self, login: str) -> List[str]:
        """
        Return the list of roles of a user.

        :param: login: user name to use on lookup

        :return: list of user roles
        """
        return self.client("user.listRoles", login)

    def add_role(self, login: str, role: str) -> bool:
        """
        Add a role to a user

        :param login: login of the user
        :param role: a new role

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.addRole", login, role))

    def remove_role(self, login: str, role: str) -> bool:
        """
        Remove user from the Uyuni org.

        :param login: login of the user
        :param role: one of uyuni user roles

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.removeRole", login, role))

    def list_assigned_system_groups(self, login: str) -> List[Dict[str, Union[int, str]]]:
        """
        Returns the system groups that a user can administer.

        :param login: login of the user

        :return: List of system groups that a user can administer
        """
        return self.client("user.listAssignedSystemGroups", login)

    def add_assigned_system_groups(self, login: str, server_group_names: List[str], set_default: bool = False) -> int:
        """
        Add system groups to a user's list of assigned system groups.

        :param login: user id to look for
        :param server_group_names: system groups to add
        :param set_default: True if the system groups should also be added to user's default list.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.addAssignedSystemGroups",
                                                       login, server_group_names, set_default))

    def remove_assigned_system_groups(self, login: str, server_group_names: List[str], set_default: bool = False) -> int:
        """
        Remove system groups from a user's list of assigned system groups

        :param login: user id to look for
        :param server_group_names: systems groups to remove from list of assigned system groups
        :param set_default: True if the system groups should also be removed to user's default list.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.removeAssignedSystemGroups",
                                                       login, server_group_names, set_default))


class UyuniChannel(UyuniRemoteObject):
    def list_manageable_channels(self) -> List[Dict[str, Union[int, str]]]:
        """
        List all software channels that the user is entitled to manage.

        :return: list of manageable channels
        """
        return self.client("channel.listManageableChannels")

    def list_my_channels(self) -> List[Dict[str, Union[int, str]]]:
        """
        List all software channels that the user is entitled to manage.

        :return: list of manageable channels
        """
        return self.client("channel.listMyChannels")


class UyuniChannelSoftware(UyuniRemoteObject):
    def set_user_manageable(self, channel_label: str, login: str, access: bool) -> int:
        """
        Set the manageable flag for a given channel and user.
        If access is set to 'true', this method will give the user manage permissions to the channel.
        Otherwise, that privilege is revoked.

        :param channel_label: label of the channel
        :param login: user login id
        :param access: True if the user should have management access to channel

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("channel.software.setUserManageable",
                                                       channel_label, login, access))

    def set_user_subscribable(self, channel_label: str, login: str, access: bool) -> int:
        """
        Set the subscribable flag for a given channel and user.
        If value is set to 'true', this method will give the user subscribe permissions to the channel.
        Otherwise, that privilege is revoked.

        :param channel_label: label of the channel
        :param login: user login id
        :param access: True if the user should have subscribe permission to the channel

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("channel.software.setUserSubscribable",
                                                       channel_label, login, access))

    def is_user_manageable(self, channel_label: str, login: str) -> bool:
        """
        Returns whether the channel may be managed by the given user.

        :param channel_label: label of the channel
        :param login: user login id

        :return: boolean which indicates if user can manage channel or not
        """
        return self._convert_bool_response(self.client("channel.software.isUserManageable", channel_label, login))

    def is_user_subscribable(self, channel_label: str, login: str) -> bool:
        """
        Returns whether the channel may be subscribed to by the given user.

        :param channel_label: label of the channel
        :param login: user login id

        :return: boolean which indicates if user subscribe the channel or not
        """
        return self._convert_bool_response(self.client("channel.software.isUserSubscribable", channel_label, login))

    def is_globally_subscribable(self, channel_label: str) -> bool:
        """
        Returns whether the channel is globally subscribable on the organization

        :param channel_label: label of the channel

        :return: boolean which indicates if channel is globally subscribable
        """
        return self._convert_bool_response(self.client("channel.software.isGloballySubscribable", channel_label))


class UyuniOrg(UyuniRemoteObject):
    """
    CRUD operations on organizations
    """

    def list_orgs(self) -> Dict[str, Union[int, str, bool]]:
        """
        List all organizations.

        :return: list of all existing organizations
        """
        return self.client("org.listOrgs")

    def get_details(self, name: str) -> Dict[str, Union[int, str, bool]]:
        """
        Get org data by name.

        :param name: organisation name

        :return: organization details
        """
        return self.client("org.getDetails", name)

    def create(self, name: str, org_admin_user: str, org_admin_password: str,
               first_name: str, last_name: str, email: str,
               admin_prefix: str = "Mr.", pam: bool = False) -> Dict[str, Union[str, int, bool]]:
        """
        Create a new Uyuni org.

        :param name: organization name
        :param org_admin_user: organization admin user
        :param org_admin_password: organization admin password
        :param first_name: organization admin first name
        :param last_name: organization admin last name
        :param email: organization admin email
        :param admin_prefix: organization admin prefix
        :param pam:organization admin pam authentication

        :return: dictionary with org information
        """
        return self.client("org.create", name, org_admin_user, org_admin_password, admin_prefix,
                           first_name, last_name, email, pam)

    def delete(self, name: str) -> int:
        """
        Delete an Uyuni org.

        :param name: organization name

        :return: boolean, True indicates success
        """
        org_id = int(self.get_details(name=name).get("id", -1))
        return self._convert_bool_response(self.client("org.delete", org_id))

    def update_name(self, org_id: int, name: str) -> Dict[str, Union[str, int, bool]]:
        """
        Update an Uyuni org name.

        :param org_id: organization internal id
        :param name: new organization name

        :return: organization details
        """
        return self.client("org.updateName", org_id, name)


class UyuniOrgTrust(UyuniRemoteObject):

    def __init__(self, user: str = None, password: str = None):
        UyuniRemoteObject.__init__(self, user, password)
        self._org_manager = UyuniOrg(user, password)

    def list_orgs(self) -> List[Dict[str, Union[str, int]]]:
        """
        List all organizations trusted by the authenticated user organization

        :return: List of organization details
        """
        return self.client("org.trusts.listOrgs")

    def list_trusts(self, org_name: str) -> List[Dict[str, Union[str, int, bool]]]:
        """
        List all trusts for the organization

        :return: list with all organizations and their trust status
        """
        org = self._org_manager.get_details(org_name)
        return self.client("org.trusts.listTrusts", org["id"])

    def add_trust_by_name(self, org_name: str, org_trust: str) -> int:
        """
        Set an organisation as trusted by another

        :param org_name: organization name
        :param org_trust: name of organization to trust

        :return: boolean, True indicates success
        """
        this_org = self._org_manager.get_details(org_name)
        trust_org = self._org_manager.get_details(org_trust)
        return self.add_trust(this_org["id"], trust_org["id"])

    def add_trust(self, org_id: str, org_trust_id: str) -> int:
        """
        Set an organisation as trusted by another

        :param org_id: organization id
        :param org_trust_id: organization id to trust

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("org.trusts.addTrust", org_id, org_trust_id))

    def remove_trust_by_name(self, org_name: str, org_untrust: str) -> int:
        """
        Set an organisation as not trusted by another

        :param org_name: organization name
        :param org_untrust: organization name to untrust

        :return: boolean, True indicates success
        """
        this_org = self._org_manager.get_details(org_name)
        trust_org = self._org_manager.get_details(org_untrust)
        return self.remove_trust(this_org["id"], trust_org["id"])

    def remove_trust(self, org_id: str, org_untrust_id: str) -> int:
        """
        Set an organisation as not trusted by another

        :param org_id: organization id
        :param org_untrust_id: organization id to untrust

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("org.trusts.removeTrust", org_id, org_untrust_id))


class UyuniSystemgroup(UyuniRemoteObject):
    """
    Provides methods to access and modify system groups.
    """

    def list_all_groups(self) -> List[Dict[str, Union[int, str]]]:
        """
        Retrieve a list of system groups that are accessible by the user
        :return: list with group information
        """
        return self.client("systemgroup.listAllGroups")

    def get_details(self, name: str) -> Dict[str, Union[int, str]]:
        """
        Retrieve details of a system group.

        :param name: Name of the system group.
        :return: data of the system group.
        """
        return self.client("systemgroup.getDetails", name)

    def create(self, name: str, description: str) -> Dict[str, Union[int, str]]:
        """
        Create a new system group.

        :param name: Name of the system group.
        :param description: Description of the system group.

        :return: data of the system group.
        """
        return self.client("systemgroup.create", name, description)

    def delete(self, name: str) -> int:
        """
        Delete a system group.

        :param name: Name of the system group.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("systemgroup.delete", name))

    def update(self, name: str, description: str) -> Dict[str, Union[int, str]]:
        """
        Update an existing system group.

        :param name: Name of the system group.
        :param description: Description of the system group.

        :return: data of the system group.
        """
        return self.client("systemgroup.update", name, description)

    def list_systems(self, name: str, minimal: bool = True) -> List[Dict[str, Any]]:
        """
        Get information about systems in a group.

        :param name: Group name
        :param minimal: default True. Only return minimal information about systems, use False to get more details

        :return: List of system information
        """
        return self._convert_datetime_list(
            self.client("systemgroup.listSystemsMinimal" if minimal else "systemgroup.listSystems", name))

    def add_remove_systems(self, name: str, add_remove: bool, system_ids: List[int] = []) -> int:
        """
        Add or remove systems from a system group

        :param name: Group name
        :param add_remove: True to add to the group, False to remove
        :param system_ids: List of system ids to add or remove

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("systemgroup.addOrRemoveSystems", name, system_ids, add_remove))


class UyuniSystems(UyuniRemoteObject):

    def get_minion_id_map(self, refresh: bool = False) -> Dict[str, int]:
        """
        Returns a map from minion ID to Uyuni system ID for all systems a user has access to
        This method caches results, in order to avoid multiple XMLRPC calls.

        :param refresh: Get new data from server, ignoring values in local context cache
        :return: Map between minion ID and system ID of all system accessible by authenticated user
        """
        minions_token_key = "uyuni.minions_id_map_" + self.client.get_user()
        if (not minions_token_key in __context__) or refresh:
            __context__[minions_token_key] = self.client("system.getMinionIdMap")
        return __context__[minions_token_key]


class UyuniActivationKey(UyuniRemoteObject):
    """
    CRUD operations on Activation Keys.
    """

    def get_details(self, id: str) -> Dict[str, Any]:
        """
        Get details of an Uyuni Activation Key

        :param id: the Activation Key ID

        :return: Activation Key information
        """
        return self.client("activationkey.getDetails", id)

    def delete(self, id: str) -> bool:
        """
        Deletes an Uyuni Activation Key

        :param id: the Activation Key ID

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.delete", id))

    def create(self, key: str, description: str,
               base_channel_label: str = '',
               usage_limit: int = 0,
               system_types: List[int] = [],
               universal_default: bool = False) -> bool:
        """
        Creates an Uyuni Activation Key

        :param key: activation key name
        :param description: activation key description
        :param base_channel_label: base channel to be used
        :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
        :param system_types: system types to be assigned.
                             Can be one of: 'virtualization_host', 'container_build_host',
                             'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
        :param universal_default: sets this activation key as organization universal default

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.create", key, description, base_channel_label,
                                                        usage_limit, system_types, universal_default))

    def set_details(self, key: str,
                    description: str = None,
                    contact_method: str = None,
                    base_channel_label: str = None,
                    usage_limit: int = None,
                    universal_default: bool = False):
        """
        Updates an Uyuni Activation Key

        :param key: activation key name
        :param description: activation key description
        :param base_channel_label: base channel to be used
        :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
        :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
        :param universal_default: sets this activation key as organization universal default

        :return: boolean, True indicates success
        """
        data = {'universal_default': universal_default}
        if description:
            data['description'] = description
        if base_channel_label is not None:
            data['base_channel_label'] = base_channel_label
        if contact_method:
            data['contact_method'] = contact_method

        if usage_limit:
            data['usage_limit'] = usage_limit
        else:
            data['unlimited_usage_limit'] = True
        return self._convert_bool_response(self.client("activationkey.setDetails", key, data))

    def add_entitlements(self, key: str, system_types: List[str]) -> bool:
        """
        Add a list of entitlements to an activation key.

        :param key: activation key name
        :param system_types: list of system types to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addEntitlements", key, system_types))

    def remove_entitlements(self, key: str, system_types: List[str]) -> bool:
        """
        Remove a list of entitlements from an activation key.

        :param key: activation key name
        :param system_types: list of system types to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeEntitlements", key, system_types))

    def add_child_channels(self, key: str, child_channels: List[str]) -> bool:
        """
        Add child channels to an activation key.

        :param key: activation key name
        :param child_channels: List of child channels to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addChildChannels", key, child_channels))

    def remove_child_channels(self, key: str, child_channels: List[str]) -> bool:
        """
        Remove child channels from an activation key.

        :param key: activation key name
        :param child_channels: List of child channels to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeChildChannels", key, child_channels))

    def check_config_deployment(self, key: str) -> bool:
        """
        Return the status of the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, true if enabled, false if disabled,
        """
        return self._convert_bool_response(self.client("activationkey.checkConfigDeployment", key))

    def enable_config_deployment(self, key: str) -> bool:
        """
        Enables the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.enableConfigDeployment", key))

    def disable_config_deployment(self, key: str) -> bool:
        """
        Disables the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.disableConfigDeployment", key))

    def add_packages(self, key: str, packages: List[Any]) -> bool:
        """
        Add a list of packages to an activation key.

        :param key: activation key name
        :param packages: list of packages to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addPackages", key, packages))

    def remove_packages(self, key: str, packages: List[Any]) -> bool:
        """
        Remove a list of packages from an activation key.

        :param key: activation key name
        :param packages: list of packages to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removePackages", key, packages))

    def add_server_groups(self, key: str, server_groups: List[int]) -> bool:
        """
        Add a list of server groups to an activation key.

        :param key: activation key name
        :param server_groups: list of server groups to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addServerGroups", key, server_groups))

    def remove_server_groups(self, key: str, server_groups: List[int]) -> bool:
        """
        Remove a list of server groups from an activation key.

        :param key: activation key name
        :param server_groups: list of server groups to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeServerGroups", key, server_groups))

    def list_config_channels(self, key: str) -> List[Dict[str, Any]]:
        """
        List configuration channels associated to an activation key.
    
        :param key: activation key name

        :return: List of configuration channels
        """
        return self.client("activationkey.listConfigChannels", key)

    def set_config_channels(self, keys: List[str], config_channel_label: List[str]) -> bool:
        """
        Replace the existing set of configuration channels on the given activation keys.
        Channels are ranked by their order in the array.

        :param keys: list of activation key names
        :param config_channel_label: list of configuration channels lables

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.setConfigChannels", keys, config_channel_label))


class UyuniChildMasterIntegration:
    """
    Integration with the Salt Master which is running
    on the same host as this current Minion.
    """
    DEFAULT_MASTER_CONFIG_PATH = "/etc/salt/master"

    def __init__(self):
        self._minions = CkMinions(salt.config.client_config(self._get_master_config()))

    @staticmethod
    def _get_master_config() -> str:
        """
        Return master config.
        :return: path to salt master configuration file
        """
        cfg_path = UyuniChildMasterIntegration.DEFAULT_MASTER_CONFIG_PATH
        for path in __pillar__.get("uyuni", {}).get("masters", {}).get("configs", [cfg_path]):
            if os.path.exists(path):
                cfg_path = path
                break

        return cfg_path

    def select_minions(self, target: str, target_type: str = "glob") -> Dict[str, Union[List[str], bool]]:
        """
        Select minion IDs that matches the target expression.

        :param target: target expression to be applied
        :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                    pillar_exact, compound, compound_pillar_exact. Default: glob.

        :return: list of minions
        """
        return self._minions.check_minions(expr=target, tgt_type=target_type)


def __virtual__():
    """
    Provide Uyuni configuration state module.

    :return:
    """

    return __virtualname__


# Users

def user_get_details(login, password=None, org_admin_user=None, org_admin_password=None):
    """
    Get details of an Uyuni user
    If password is provided as a parameter, then it will be used to authenticate
    If no user credentials are provided, organization administrator credentials will be used
    If no user credentials neither organization admin credentials are provided, credentials from pillar will be used

    :param login: user id to look for
    :param password: password for the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: The user information
    """
    return UyuniUser(org_admin_user if password is None else login,
                     org_admin_password if password is None else password).get_details(login)


def user_list_users(org_admin_user=None, org_admin_password=None):
    """
    Return all Uyuni users visible to the authenticated user.

    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: all users visible to the authenticated user
    """
    return UyuniUser(org_admin_user, org_admin_password).list_users()


def user_create(login, password, email, first_name, last_name, use_pam_auth=False,
                org_admin_user=None, org_admin_password=None):
    """
    Create an Uyuni user.

    :param login: user id to look for
    :param password: password for the user
    :param email: user email address
    :param first_name: user first name
    :param last_name: user last name
    :param use_pam_auth: if you wish to use PAM authentication for this user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).create(login=login, password=password, email=email,
                                                                first_name=first_name, last_name=last_name,
                                                                use_pam_auth=use_pam_auth)


def user_set_details(login, password, email, first_name=None, last_name=None,
                     org_admin_user=None, org_admin_password=None):
    """
    Update an Uyuni user.

    :param login: user id to look for
    :param password: password for the user
    :param email: user email address
    :param first_name: user first name
    :param last_name: user last name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).set_details(login=login, password=password, email=email,
                                                                     first_name=first_name, last_name=last_name)


def user_delete(login, org_admin_user=None, org_admin_password=None):
    """
    Deletes an Uyuni user

    :param login: user id to look for
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).delete(login)


def user_list_roles(login, password=None, org_admin_user=None, org_admin_password=None):
    """
    Returns an Uyuni user roles.
    If password is provided as a parameter, then it will be used to authenticate
    If no user credentials are provided, organization administrator credentials will be used
    If no user credentials neither organization admin credentials are provided, credentials from pillar are used

    :param login: user id to look for
    :param password: password for the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of user roles assigned
    """
    return UyuniUser(org_admin_user if password is None else login,
                     org_admin_password if password is None else password).list_roles(login)


def user_add_role(login, role, org_admin_user=None, org_admin_password=None):
    """
    Adds a role to an Uyuni user.

    :param login: user id to look for
    :param role: role to be added to the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).add_role(login=login, role=role)


def user_remove_role(login, role, org_admin_user=None, org_admin_password=None):
    """
    Remove a role from an Uyuni user.

    :param login: user id to look for
    :param role: role to be removed from the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).remove_role(login=login, role=role)


def user_list_assigned_system_groups(login, org_admin_user=None, org_admin_password=None):
    """
    Returns the system groups that a user can administer.

    :param login: user id to look for
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of system groups that a user can administer
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).list_assigned_system_groups(login=login)


def user_add_assigned_system_groups(login, server_group_names, set_default=False,
                                    org_admin_user=None, org_admin_password=None):
    """
    Add system groups to user's list of assigned system groups.

    :param login: user id to look for
    :param server_group_names: systems groups to add to list of assigned system groups
    :param set_default: Should system groups also be added to user's list of default system groups.
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).add_assigned_system_groups(login=login,
                                                                    server_group_names=server_group_names,
                                                                    set_default=set_default)


def user_remove_assigned_system_groups(login, server_group_names, set_default=False,
                                       org_admin_user=None, org_admin_password=None):
    """
    Remove system groups from a user's list of assigned system groups.

    :param login: user id to look for
    :param server_group_names: systems groups to remove from list of assigned system groups
    :param set_default: Should system groups also be added to user's list of default system groups.
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).remove_assigned_system_groups(login=login,
                                                                       server_group_names=server_group_names,
                                                                       set_default=set_default)


# Software channels

def channel_list_manageable_channels(login, password):
    """
    List all of manageable channels for the authenticated user

    :param login: user login id
    :param password: user password

    :return: list of manageable channels for the user
    """
    return UyuniChannel(login, password).list_manageable_channels()


def channel_list_my_channels(login, password):
    """
    List all of subscribed channels for the authenticated user

    :param login: user login id
    :param password: user password

    :return: list of subscribed channels for the user
    """
    return UyuniChannel(login, password).list_my_channels()


def channel_software_set_user_manageable(channel_label, login, access,
                                         org_admin_user=None, org_admin_password=None):
    """
    Set the manageable flag for a given channel and user.
    If access is set to 'true', this method will give the user manage permissions to the channel.
    Otherwise, that privilege is revoked.

    :param channel_label: label of the channel
    :param login: user login id
    :param access: True if the user should have management access to channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).set_user_manageable(channel_label, login, access)


def channel_software_set_user_subscribable(channel_label, login, access,
                                           org_admin_user=None, org_admin_password=None):
    """
    Set the subscribable flag for a given channel and user.
    If value is set to 'true', this method will give the user subscribe permissions to the channel.
    Otherwise, that privilege is revoked.

    :param channel_label: label of the channel
    :param login: user login id
    :param access: True if the user should have subscribe access to channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).set_user_subscribable(channel_label, login, access)


def channel_software_is_user_manageable(channel_label, login, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel may be managed by the given user.

    :param channel_label: label of the channel
    :param login: user login id
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if user can manage channel or not
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_user_manageable(channel_label, login)


def channel_software_is_user_subscribable(channel_label, login, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel may be subscribed by the given user.

    :param channel_label: label of the channel
    :param login: user login id
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if user subscribe the channel or not
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_user_subscribable(channel_label, login)


def channel_software_is_globally_subscribable(channel_label, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel is globally subscribable on the organization

    :param channel_label: label of the channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if channel is globally subscribable
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_globally_subscribable(channel_label)


def org_list_orgs(admin_user=None, admin_password=None):
    """
    List all organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: list of all available organizations.
    """
    return UyuniOrg(admin_user, admin_password).list_orgs()


def org_get_details(name, admin_user=None, admin_password=None):
    """
    Get details of an organization.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organisation name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: organization details
    """
    return UyuniOrg(admin_user, admin_password).get_details(name)


def org_delete(name, admin_user=None, admin_password=None):
    """
    Delete an organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrg(admin_user, admin_password).delete(name)


def org_create(name, org_admin_user, org_admin_password, first_name, last_name, email,
               admin_prefix="Mr.", pam=False, admin_user=None, admin_password=None):
    """
    Create an Uyuni organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password
    :param first_name: organization admin first name
    :param last_name: organization admin last name
    :param email: organization admin email
    :param admin_prefix: organization admin prefix
    :param pam:organization admin pam authentication
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dictionary with org information
    """
    return UyuniOrg(admin_user, admin_password).create(name=name, org_admin_user=org_admin_user,
                                                       org_admin_password=org_admin_password,
                                                       first_name=first_name, last_name=last_name, email=email,
                                                       admin_prefix=admin_prefix, pam=pam)


def org_update_name(org_id, name, admin_user=None, admin_password=None):
    """
    update an Uyuni organization name
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: organization internal id
    :param name: new organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: organization details
    """
    return UyuniOrg(admin_user, admin_password).update_name(org_id, name)


def org_trust_list_orgs(org_admin_user=None, org_admin_password=None):
    """
    List all organizations trusted by the authenticated user organization

    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password

    :return: List of organization details
    """
    return UyuniOrgTrust(org_admin_user, org_admin_password).list_orgs()


def org_trust_list_trusts(org_name, admin_user=None, admin_password=None):
    """
    List all trusts for one organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: Name of the organization to get the trusts
    :param admin_user: authentication user
    :param admin_password: authentication user password

    :return: list with all organizations and their trust status
    """
    return UyuniOrgTrust(admin_user, admin_password).list_trusts(org_name)


def org_trust_add_trust_by_name(org_name, org_trust, admin_user=None, admin_password=None):
    """
    Add an organization to the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: organization name
    :param org_trust: Trust organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).add_trust_by_name(org_name, org_trust)


def org_trust_add_trust(org_id, org_trust_id, admin_user=None, admin_password=None):
    """
    Add an organization to the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: Organization id
    :param org_trust_id: Trust organization id
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).add_trust(org_id, org_trust_id)


def org_trust_remove_trust_by_name(org_name, org_untrust, admin_user=None, admin_password=None):
    """
    Remove an organization from the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: organization name
    :param org_untrust: organization name to untrust
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).remove_trust_by_name(org_name, org_untrust)


def org_trust_remove_trust(org_id, org_untrust_id, admin_user=None, admin_password=None):
    """
    Remove an organization from the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: orgnization id
    :param org_untrust_id: organizaton id to untrust
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).remove_trust(org_id, org_untrust_id)


# System Groups

def systemgroup_create(name, descr, org_admin_user=None, org_admin_password=None):
    """
    Create a system group.

    :param name: Name of the system group.
    :param descr: Description of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).create(name=name, description=descr)


def systemgroup_list_all_groups(username, password):
    """
    Retrieve a list of system groups that are accessible by the user

    :param username: username to authenticate with
    :param password: password to authenticate with
    :return:
    """
    return UyuniSystemgroup(username, password).list_all_groups()


def systemgroup_get_details(name, org_admin_user=None, org_admin_password=None):
    """
    Return system group details.

    :param name: Name of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).get_details(name=name)


def systemgroup_update(name, descr, org_admin_user=None, org_admin_password=None):
    """
    Update a system group.

    :param name: Name of the system group.
    :param descr: Description of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).update(name=name, description=descr)


def systemgroup_delete(name, org_admin_user=None, org_admin_password=None):
    """
    Delete a system group.

    :param name: Name of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: boolean, True indicates success
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).delete(name=name)


def systemgroup_list_systems(name, minimal=True, org_admin_user=None, org_admin_password=None):
    """
    List systems in a system group

    :param name: Name of the system group.
    :param minimal: default True. Only return minimal information about systems, use False to get more details
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: List of system information
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).list_systems(name=name, minimal=minimal)


def systemgroup_add_remove_systems(name, add_remove, system_ids=[],
                                   org_admin_user=None, org_admin_password=None):
    """
    Update systems on a system group.

    :param name: Name of the system group.
    :param add_remove: True to add to the group, False to remove.
    :param system_ids: list of system ids to add/remove from group
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: boolean, True indicates success
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).add_remove_systems(name=name, add_remove=add_remove,
                                                                                   system_ids=system_ids)


def master_select_minions(target=None, target_type="glob"):
    """
    Return list minions from the configured Salt Master on the same host which match the expression on the defined target

    :param target: target expression to filter minions
    :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.

    :return: list of minion IDs
    """
    cmi = UyuniChildMasterIntegration()

    return cmi.select_minions(target=target, target_type=target_type)


def systems_get_minion_id_map(username=None, password=None, refresh=False):
    """
    Returns a map from minion ID to Uyuni system ID for all systems a user has access to

    :param username: username to authenticate
    :param password: password for user
    :param refresh: Get new data from server, ignoring values in local context cache

    :return: Map between minion ID and system ID of all system accessible by authenticated user
    """
    return UyuniSystems(username, password).get_minion_id_map(refresh)


# Activation Keys

def activation_key_get_details(id, org_admin_user=None, org_admin_password=None):
    """
    Get details of an Uyuni Activation Key

    :param id: the Activation Key ID
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: Activation Key information
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).get_details(id)


def activation_key_delete(id, org_admin_user=None, org_admin_password=None):
    """
    Deletes an Uyuni Activation Key

    :param id: the Activation Key ID
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).delete(id)


def activation_key_create(key, description,
                          base_channel_label='',
                          usage_limit=0,
                          system_types=[], universal_default=False,
                          org_admin_user=None, org_admin_password=None):
    """
    Creates an Uyuni Activation Key

    :param key: activation key name
    :param description: activation key description
    :param base_channel_label: base channel to be used
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param system_types: system types to be assigned.
                         Can be one of: 'virtualization_host', 'container_build_host',
                         'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
    :param universal_default: sets this activation key as organization universal default
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).create(key,
                                                                         description,
                                                                         base_channel_label,
                                                                         usage_limit,
                                                                         system_types,
                                                                         universal_default)


def activation_key_set_details(key,
                               description=None,
                               contact_method=None,
                               base_channel_label=None,
                               usage_limit=None,
                               universal_default=False,
                               org_admin_user=None, org_admin_password=None):
    """
    Updates an Uyuni Activation Key

    :param key: activation key name
    :param description: activation key description
    :param base_channel_label: base channel to be used
    :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param universal_default: sets this activation key as organization universal default
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).set_details(key,
                                                                              description=description,
                                                                              contact_method=contact_method,
                                                                              base_channel_label=base_channel_label,
                                                                              usage_limit=usage_limit,
                                                                              universal_default=universal_default)


def activation_key_add_entitlements(key, system_types, org_admin_user=None, org_admin_password=None):
    """
    Add a list of entitlements to an activation key.

    :param key: activation key name
    :param system_types: list of system types to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_entitlements(key, system_types)


def activation_key_remove_entitlements(key, system_types, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of entitlements from an activation key.

    :param key: activation key name
    :param system_types: list of system types to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_entitlements(key, system_types)


def activation_key_add_child_channels(key, child_channels, org_admin_user=None, org_admin_password=None):
    """
    Add child channels to an activation key.

    :param key: activation key name
    :param child_channels: List of child channels to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_child_channels(key, child_channels)


def activation_key_remove_child_channels(key, child_channels, org_admin_user=None, org_admin_password=None):
    """
    Remove child channels from an activation key.

    :param key: activation key name
    :param child_channels: List of child channels to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_child_channels(key, child_channels)


def activation_key_check_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Return the status of the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, true if enabled, false if disabled
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).check_config_deployment(key)


def activation_key_enable_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Enables the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).enable_config_deployment(key)


def activation_key_disable_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Disables the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).disable_config_deployment(key)


def activation_key_add_packages(key, packages, org_admin_user=None, org_admin_password=None):
    """
    Add a list of packages to an activation key.

    :param key: activation key name
    :param packages: list of packages to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_packages(key, packages)


def activation_key_remove_packages(key, packages, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of packages from an activation key.

    :param key: activation key name
    :param packages: list of packages to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_packages(key, packages)


def activation_key_add_server_groups(key, server_groups, org_admin_user=None, org_admin_password=None):
    """
    Add a list of server groups to an activation key.

    :param key: activation key name
    :param server_groups: list of server groups to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_server_groups(key, server_groups)


def activation_key_remove_server_groups(key, server_groups, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of server groups from an activation key.

    :param key: activation key name
    :param server_groups: list of server groups to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_server_groups(key, server_groups)


def activation_key_list_config_channels(key, org_admin_user=None, org_admin_password=None):
    """
    List configuration channels associated to an activation key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of configuration channels
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).list_config_channels(key)


def activation_key_set_config_channels(keys, config_channel_label,
                                       org_admin_user=None, org_admin_password=None):
    """
    Replace the existing set of configuration channels on the given activation keys.
    Channels are ranked by their order in the array.

    :param keys: list of activation key names
    :param config_channel_label: list of configuration channels labels
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).set_config_channels(keys, config_channel_label)
  070701000000C4000081B400000000000000000000000163F87E3000002133000000000000000000000000000000000000002A00000000susemanager-sls/src/modules/virt_utils.py """
virt utility functions
"""

import logging
from pathlib import Path
import os.path
import re
import subprocess
from xml.etree import ElementTree
try:
    import libvirt
except ImportError:
    pass

try:
    import virt_tuner
except ImportError:
    virt_tuner = None

from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = "virt_utils"


def __virtual__():
    """
    Only if the virt module is loaded
    """
    return (
        __virtualname__
        if "virt.vm_info" in __salt__
        else (False, "Module virt_utils: virt module can't be loaded")
    )


def get_cluster_filesystem(path):
    """
    Get the cluster filesystem resource containing a path.

    :param path: the path to check
    :return: the matching Filesystem resource name or `None`
    """
    resolved = Path(path).resolve()
    try:
        crm_conf = ElementTree.fromstring(
            subprocess.Popen(
                ["crm", "configure", "show", "xml"], stdout=subprocess.PIPE
            ).communicate()[0]
        )

        for resource in crm_conf.findall(".//resources/*"):
            if resource.find(".//primitive[@type='Filesystem']/") is not None:
                directory = resource.find(".//primitive//nvpair[@name='directory']")
                if directory is None:
                    continue
                directory_value = directory.get("value")
                if directory_value:
                    if (
                        os.path.commonpath([str(resolved), directory_value])
                        == directory_value
                    ):
                        return resource.get("id")
    except OSError as err:
        log.debug("Failed to get cluster resource name for path, %s: %s", path, err)

    return None


def vm_info(name=None):
    """
    Provide additional virtual machine infos
    """
    try:
        infos = __salt__["virt.vm_info"](name)
        all_vms = {}
        for vm_name in infos.keys():
            all_vms[vm_name] = {
                "graphics_type": infos[vm_name].get("graphics", {}).get("type", None),
            }
    except CommandExecutionError as err:
        all_vms = {}

    # Find out which VM is managed by a cluster
    try:
        crm_status = ElementTree.fromstring(
            subprocess.Popen(
                ["crm_mon", "-1", "--output-as", "xml"],
                stdout=subprocess.PIPE,
            ).communicate()[0]
        )
        resource_states = {}
        for resource in crm_status.findall(".//resources/resource[@resource_agent='ocf::heartbeat:VirtualDomain']"):
            resource_states[resource.get('id')] = resource.get('active') == "true"
        crm_conf = ElementTree.fromstring(
            subprocess.Popen(
                ["crm", "configure", "show", "xml", "type:primitive"],
                stdout=subprocess.PIPE,
            ).communicate()[0]
        )
        for primitive in crm_conf.findall(".//primitive[@type='VirtualDomain']"):
            config_node = primitive.find(".//nvpair[@name='config']")
            if config_node is None:
                continue
            path = config_node.get("value")
            if path is None:
                continue
            desc = ElementTree.parse(path)
            name_node = desc.find("./name")
            # Provide infos on VMs managed by the cluster running on this node or not running at all
            if name_node is not None and name_node.text in all_vms or not resource_states[primitive.get("id")]:
                if name_node.text not in all_vms:
                    all_vms[name_node.text] = {}
                all_vms[name_node.text]["cluster_primitive"] = primitive.get("id")
                all_vms[name_node.text]["definition_path"] = path
                # Provide the UUID if possible since this will allow matching the VM with the DB record
                uuid_node = desc.find("uuid")
                if uuid_node is not None:
                    all_vms[name_node.text]["uuid"] = uuid_node.text

                # Report CPU and Memory since we may not have them in the database
                vcpu_node = desc.find("vcpu")
                if vcpu_node is not None and vcpu_node.text is not None:
                    all_vms[name_node.text]["vcpus"] = int(vcpu_node.text)
                mem_node = desc.find("./memory")
                if mem_node is not None and mem_node.text is not None:
                    all_vms[name_node.text]["memory"] = _convert_unit(int(mem_node.text), mem_node.get('unit', 'KiB'))

                graphics_node = desc.find(".//devices/graphics")
                if graphics_node is not None:
                    all_vms[name_node.text]["graphics_type"] = graphics_node.get("type")

            # No need to parse more XML files if we already had the ones we're looking for
            if name is not None and name_node == name:
                break
    except OSError as err:
        log.debug("Failed to get cluster configuration: %s", err)

    return all_vms


def host_info():
    """
    Provide a few informations on the virtualization host for the UI to use.
    """
    cluster_nodes = []
    try:
        node_name = subprocess.Popen(["crm_node", "-n"], stdout=subprocess.PIPE).communicate()[0].strip().decode()
        crm_conf = ElementTree.fromstring(
            subprocess.Popen(
                ["crm", "configure", "show", "xml", "type:node"],
                stdout=subprocess.PIPE,
            ).communicate()[0]
        )
        cluster_nodes = [
            node.get("uname")
            for node in crm_conf.findall(".//node")
            if node.get("uname") != node_name
        ]
    except OSError as err:
        log.debug("Failed to get cluster configuration: %s", err)

    return {
        "hypervisor": __salt__["virt.get_hypervisor"](),
        "cluster_other_nodes": cluster_nodes,
    }


def _convert_unit(value, unit):
    '''
    Convert a size with unit into MiB
    '''
    dec = False
    if re.match(r"[kmgtpezy]b$", unit.lower()):
        dec = True
    elif not re.match(r"(b|[kmgtpezy](ib)?)$", unit.lower()):
        return None
    power = "bkmgtpezy".index(unit.lower()[0])
    return int(value * (10 ** (power * 3) if dec else 2 ** (power * 10)) / (1024 ** 2))


def vm_definition(uuid):
    """
    Get the result of virt.vm_info and the XML definition in one shot from the UUID.
    Assumes the regular form of UUID with the dashes, not the one from the DB
    """
    cnx = None
    try:
        cnx = libvirt.open()
        domain = cnx.lookupByUUIDString(uuid)
        name = domain.name()
        return {"definition": __salt__["virt.get_xml"](name), "info": __salt__["virt.vm_info"](name)[name]}
    except libvirt.libvirtError:
        # The VM is not defined in libvirt, may be it is defined in the cluster
        try:
            crm_conf = ElementTree.fromstring(
                subprocess.Popen(
                    ["crm", "configure", "show", "xml", "type:primitive"],
                    stdout=subprocess.PIPE,
                ).communicate()[0]
            )
            for primitive in crm_conf.findall(".//primitive[@type='VirtualDomain']"):
                config_node = primitive.find(".//nvpair[@name='config']")
                if config_node is not None:
                    config_path = config_node.get("value")
                    if config_path is not None:
                        with open(config_path, 'r') as desc_fd:
                            desc_content = desc_fd.read()
                        desc = ElementTree.fromstring(desc_content)
                        uuid_node = desc.find("./uuid")
                        if uuid_node is not None and uuid_node.text == uuid:
                            return {"definition": desc_content}
        except OSError:
            # May be this is not a cluster node
            pass
        finally:
            if cnx:
                cnx.close()
        return {}


def virt_tuner_templates():
    """
    Get the virt-tuner templates names
    """
    if virt_tuner:
        return sorted(list(virt_tuner.templates.keys()))
    return []


def domain_parameters(cpu, mem, template):
    """
    Return the VM parameters with the potential virt-tuner template applied
    """
    params = {"cpu": cpu, "mem": mem}
    if virt_tuner and template in virt_tuner.templates:
        template_params = virt_tuner.templates[template].function()
        params.update(template_params)
    return params
 070701000000C5000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001B00000000susemanager-sls/src/states    070701000000C6000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002700000000susemanager-sls/src/states/__init__.py    070701000000C7000081B400000000000000000000000163F87E300000091F000000000000000000000000000000000000002800000000susemanager-sls/src/states/mgrcompat.py   # -*- coding: utf-8 -*-
'''
SUSE Manager custom wrapper for Salt "module.run" state module.

This wrapper determines the syntax to use for calling the Salt "module.run" state
that has changed between different Salt version.

Using this wrapper we ensure all SUSE Manager SLS files are using the same syntax
regardless the actual Salt version installed on the minion.

'''
from __future__ import absolute_import

# Import salt libs
from salt.utils.odict import OrderedDict

import logging

log = logging.getLogger(__name__)

__virtualname__ = 'mgrcompat'


def __virtual__():
    '''
    This module is always enabled while 'module.run' is available.
    '''
    return __virtualname__

def _tailor_kwargs_to_new_syntax(name, **kwargs):
    nkwargs = {}
    _opt_kwargs = None
    for k, v in kwargs.items():
        if k.startswith("m_"):
            nkwargs[k[2:]] = v
        elif k == 'kwargs':
            _opt_kwargs = kwargs[k]
        else:
            nkwargs[k] = v
    ret = {name: [OrderedDict(nkwargs)]}
    if _opt_kwargs:
        ret[name].append(OrderedDict(_opt_kwargs))
    return ret

def module_run(**kwargs):
    '''
    This function execute the Salt "module.run" state passing the arguments
    in the right way according to the supported syntax depending on the Salt
    minion version and configuration

    '''

    # We use classic "module.run" syntax by default.
    use_new_syntax = False

    if 2016 < __grains__['saltversioninfo'][0] < 3005 and 'module.run' in __opts__.get('use_superseded', []):
        # New syntax - explicitely enabled via 'use_superseded' configuration on 2018.3, 2019.2, 3000.x, 3002.x, 3003.x and 3004.x
        use_new_syntax = True

    if use_new_syntax:
        log.debug("Minion is using the new syntax for 'module.run' state. Tailoring parameters.")
        log.debug("Old parameters: {}".format(kwargs))
        old_name = kwargs.pop('name')
        new_kwargs = _tailor_kwargs_to_new_syntax(old_name, **kwargs)
        log.debug("New parameters for 'module.run' state: {}".format(new_kwargs))
    else:
        new_kwargs = kwargs

    ret = __states__['module.run'](**new_kwargs)
    if use_new_syntax:
        if ret['changes']:
            changes = ret['changes'].pop(old_name)
            ret['changes']['ret'] = changes
        ret['name'] = old_name
    return ret
 070701000000C8000081B400000000000000000000000163F87E300000040E000000000000000000000000000000000000002700000000susemanager-sls/src/states/mgrutils.py    """
Utility states
"""

from salt.exceptions import CommandExecutionError
from salt.states import file


__virtualname__ = 'mgrutils'


def __virtual__():
    '''
    This module is always enabled while 'file.managed' is available.
    '''
    file.__salt__ = __salt__
    file.__opts__ = __opts__
    file.__pillar__ = __pillar__
    file.__grains__ = __grains__
    file.__context__ = __context__
    file.__utils__ = __utils__
    return __virtualname__


def cmd_dump(name, cmd):
    """
    Dump the output of a command to a file
    """
    ret = {
        "name": name,
        "changes": {},
        "result": True if not __opts__["test"] else None,
        "comment": "",
    }
    try:
        cmd_out = __salt__['cmd.run'](cmd, raise_err=True, python_shell=False)
    except CommandExecutionError:
        ret["result"] = False
        ret["comment"] = "Failed to run command {}".format(cmd)
        return ret

    file_ret = __states__["file.managed"](name, contents=cmd_out)
    file_ret["name"] = name
    return file_ret

  070701000000C9000081B400000000000000000000000163F87E3000000BD0000000000000000000000000000000000000002600000000susemanager-sls/src/states/product.py '''
Handles installation of SUSE products using zypper

Only supported with :mod:`zypper <salt.modules.zypper>`
'''

import logging

from salt.utils.versions import version_cmp
from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'product'

def __virtual__():
    '''
    Only work on SUSE platforms with zypper
    '''
    if __grains__.get('os_family', '') != 'Suse':
        return (False, "Module product: non SUSE OS not supported")

    # Not all versions of SUSE use zypper, check that it is available
    try:
        zypp_info = __salt__['pkg.info_installed']('zypper')['zypper']
    except CommandExecutionError:
        return (False, "Module product: zypper package manager not found")

    # Minimum version that supports 'zypper search --provides'
    if version_cmp(zypp_info['version'], '1.8.13') < 0:
        return (False, "Module product: zypper 1.8.13 or greater required")
    return __virtualname__


def _get_missing_products(refresh):
    # Search for not installed products
    products = []
    try:
        products = list(__salt__['pkg.search'](
            'product()',
            refresh=refresh,
            match='exact',
            provides=True,
            not_installed_only=True
        ))

        log.debug("The following products are not yet installed: %s", ', '.join(products))

    except CommandExecutionError:
        # No search results
        return None

    # Exclude products that are already provided by another to prevent conflicts
    to_install = []
    for pkg in products:
        try:
            res = list(__salt__['pkg.search'](
                pkg,
                match='exact',
                provides=True
            ))

            if pkg in res:
                res.remove(pkg)
            if not res:
                # No other providers than the package itself
                to_install.append(pkg)
            else:
                log.debug("The product '%s' is already provided by '%s'. Skipping.", pkg, ', '.join(res))

        except CommandExecutionError:
            # No search results
            # Not provided by any installed package, add it to the list
            to_install.append(pkg)

    return to_install

def all_installed(name, refresh=False, **kwargs):
    '''
    Ensure that all the subscribed products are installed.

    refresh
        force a refresh if set to True.
        If set to False (default) it depends on zypper if a refresh is
        executed.
    '''

    ret = {'name': name,
           'changes': {},
           'result': True,
           'comment': ''}

    to_install = _get_missing_products(refresh)

    if not to_install:
        # All product packages are already installed
        ret['comment'] = "All subscribed products are already installed"
        ret['result'] = True

        log.debug("All products are already installed. Nothing to do.")
        return ret

    return __states__['pkg.installed'](name, pkgs=to_install, no_recommends=True)
070701000000CA000081B400000000000000000000000163F87E300000F24C000000000000000000000000000000000000002B00000000susemanager-sls/src/states/uyuni_config.py    import logging
from typing import Optional, Dict, Any, List, Tuple
from collections import Counter

SERVER_GROUP_NOT_FOUND_ERROR = 2201
NO_SUCH_USER_ERROR = -213
ORG_NOT_FOUND_ERROR = 2850
ACTIVATION_KEY_NOT_FOUND_ERROR = -212
AUTHENTICATION_ERROR = 2950

log = logging.getLogger(__name__)

__salt__: Dict[str, Any] = {}
__opts__: Dict[str, Any] = {}
__virtualname__ = 'uyuni'


class StateResult:

    @staticmethod
    def state_error(name: str, comment: str = None):
        return StateResult.prepare_result(name, False, comment)

    @staticmethod
    def prepare_result(name: str, result: Optional[bool], comment: str = None, changes: Dict = {}):
        return {
            'name': name,
            'changes': changes,
            'result': result,
            'comment': comment,
        }


class UyuniUsers:

    @staticmethod
    def _update_user_roles(name: str,
                           current_roles: List[str] = [],
                           new_roles: List[str] = [],
                           org_admin_user: str = None,
                           org_admin_password: str = None):

        for role_to_remove in (current_roles or []):
            if role_to_remove not in (new_roles or []):
                __salt__['uyuni.user_remove_role'](name, role=role_to_remove,
                                                   org_admin_user=org_admin_user,
                                                   org_admin_password=org_admin_password)

        for role_to_add in (new_roles or []):
            if role_to_add not in (current_roles or []):
                __salt__['uyuni.user_add_role'](name, role=role_to_add,
                                                org_admin_user=org_admin_user,
                                                org_admin_password=org_admin_password)

    @staticmethod
    def _update_user_system_groups(name: str,
                                   current_system_groups: List[str] = [],
                                   system_groups: List[str] = [],
                                   org_admin_user: str = None,
                                   org_admin_password: str = None):

        systems_groups_add = [sys for sys in (system_groups or []) if sys not in (current_system_groups or [])]
        if systems_groups_add:
            __salt__['uyuni.user_add_assigned_system_groups'](login=name, server_group_names=systems_groups_add,
                                                              org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)

        system_groups_remove = [sys for sys in (current_system_groups or []) if sys not in (system_groups or [])]
        if system_groups_remove:
            __salt__['uyuni.user_remove_assigned_system_groups'](login=name, server_group_names=system_groups_remove,
                                                                 org_admin_user=org_admin_user,
                                                                 org_admin_password=org_admin_password)

    @staticmethod
    def _compute_changes(user_changes: Dict[str, Any],
                         current_user: Dict[str, Any],
                         roles: List[str],
                         current_roles: List[str],
                         system_groups: List[str],
                         current_system_groups: List[str],
                         use_pam_auth: bool = False):
        changes = {}
        error = None
        # user field changes
        for field in ["email", "first_name", "last_name"]:
            if (current_user or {}).get(field) != user_changes.get(field):
                changes[field] = {"new": user_changes[field]}
                if current_user:
                    changes[field]["old"] = (current_user or {}).get(field)

        # role changes
        if Counter(roles or []) != Counter(current_roles or []):
            changes['roles'] = {'new': roles}
            if current_roles:
                changes['roles']['old'] = current_roles

        # system group changes
        if Counter(system_groups or []) != Counter(current_system_groups or []):
            changes['system_groups'] = {'new': system_groups}
            if current_system_groups:
                changes['system_groups']['old'] = current_system_groups

        # check if password have changed
        if current_user and not use_pam_auth:
            try:
                __salt__['uyuni.user_get_details'](user_changes.get('login'),
                                                   user_changes.get('password'))
            except Exception as exc:
                # check if it's an authentication error. If yes, password have changed
                if exc.faultCode == AUTHENTICATION_ERROR:
                    changes["password"] = {"new": "(hidden)", "old": "(hidden)"}
                else:
                    error = exc
        return changes, error

    def manage(self, login: str, password: str, email: str, first_name: str, last_name: str, use_pam_auth: bool = False,
               roles: Optional[List[str]] = [], system_groups: Optional[List[str]] = [],
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Ensure a user is present with all specified properties

        :param login: user login ID
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name
        :param use_pam_auth: if you wish to use PAM authentication for this user
        :param roles: roles to assign to user
        :param system_groups: system groups to assign user to
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :return: dict for Salt communication
        """
        current_user = None
        current_roles = None
        current_system_groups_names = None
        try:
            current_user = __salt__['uyuni.user_get_details'](login, org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)
            current_roles = __salt__['uyuni.user_list_roles'](login, org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)
            current_system_groups = __salt__['uyuni.user_list_assigned_system_groups'](login,
                                                                                       org_admin_user=org_admin_user,
                                                                                       org_admin_password=org_admin_password)
            current_system_groups_names = [s["name"] for s in (current_system_groups or [])]
        except Exception as exc:
            if exc.faultCode == AUTHENTICATION_ERROR:
                error_message = "Error while retrieving user information (admin credentials error) '{}': {}".format(
                    login, exc)
                log.warning(error_message)
                return StateResult.state_error(login, comment=error_message)

        user_paramters = {"login": login, "password": password, "email": email,
                          "first_name": first_name, "last_name": last_name,
                          "org_admin_user": org_admin_user, "org_admin_password": org_admin_password}

        changes, error = self._compute_changes(user_paramters, current_user,
                                               roles, current_roles,
                                               system_groups, current_system_groups_names,
                                               use_pam_auth=use_pam_auth)

        if error:
            return StateResult.state_error(login, "Error computing changes for user '{}': {}".format(login, error))
        if not changes:
            return StateResult.prepare_result(login, True, "{0} is already in the desired state".format(login))
        if not current_user:
            changes['login'] = {"new": login}
            changes['password'] = {"new": "(hidden)"}
        if __opts__['test']:
            return StateResult.prepare_result(login, None, "{0} would be modified".format(login), changes)

        try:
            if current_user:
                __salt__['uyuni.user_set_details'](**user_paramters)
            else:
                user_paramters["use_pam_auth"] = use_pam_auth
                __salt__['uyuni.user_create'](**user_paramters)

            self._update_user_roles(login, current_roles, roles,
                                    org_admin_user, org_admin_password)
            self._update_user_system_groups(login, current_system_groups_names, system_groups,
                                            org_admin_user, org_admin_password)
        except Exception as exc:
            return StateResult.state_error(login, "Error modifying user '{}': {}".format(login, exc))
        else:
            return StateResult.prepare_result(login, True, "{0} user successfully modified".format(login), changes)

    def delete(self, login: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni user

        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :param login: login of the user

        :return: dict for Salt communication
        """
        try:
            user = __salt__['uyuni.user_get_details'](login, org_admin_user=org_admin_user,
                                                      org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == NO_SUCH_USER_ERROR:
                return StateResult.prepare_result(login, True, "{0} is already absent".format(login))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(login,
                                               "Error deleting user (organization credentials error) '{}': {}".format(
                                                   login, exc))
            raise exc
        else:
            changes = {
                'login': {'old': login},
                'email': {'old': user.get('email')},
                'first_name': {'old': user.get('first_name')},
                'last_name': {'old': user.get('last_name')}
            }
            if __opts__['test']:
                return StateResult.prepare_result(login, None, "{0} would be deleted".format(login), changes)

            try:
                __salt__['uyuni.user_delete'](login,
                                              org_admin_user=org_admin_user,
                                              org_admin_password=org_admin_password)
                return StateResult.prepare_result(login, True, "User {} has been deleted".format(login), changes)
            except Exception as exc:
                return StateResult.state_error(login, "Error deleting user '{}': {}".format(login, exc))


class UyuniUserChannels:

    @staticmethod
    def process_changes(current_managed_channels: Optional[List[str]],
                        new_managed_channels: Optional[List[str]],
                        current_subscribe_channels: List[str],
                        new_subscribe_channels: List[str],
                        org_admin_user: str, org_admin_password: str) -> Dict[str, Dict[str, bool]]:
        managed_changes: Dict[str, bool] = {}
        managed_changes.update({new_ma: True for new_ma in (new_managed_channels or [])
                                if new_ma not in current_managed_channels})

        managed_changes.update({old_ma: False for old_ma in (current_managed_channels or [])
                                if old_ma not in new_managed_channels})

        subscribe_changes: Dict[str, bool] = {}
        for new_channel in (new_subscribe_channels or []):
            if new_channel not in (current_subscribe_channels or []) or not managed_changes.get(new_channel, True):
                subscribe_changes[new_channel] = True

        for curr_channel in (current_subscribe_channels or []):
            if not (curr_channel in new_subscribe_channels or curr_channel in new_managed_channels):
                if not __salt__['uyuni.channel_software_is_globally_subscribable'](curr_channel,
                                                                                   org_admin_user,
                                                                                   org_admin_password):
                    subscribe_changes[curr_channel] = False
        changes = {}
        if managed_changes:
            changes['manageable_channels'] = managed_changes
        if subscribe_changes:
            changes['subscribable_channels'] = subscribe_changes
        return changes

    def manage(self, login: str, password: str,
               manageable_channels: Optional[List[str]] = [],
               subscribable_channels: Optional[List[str]] = [],
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Modifies user-channel associations

        :param login: user login ID
        :param password: user password
        :param manageable_channels: channels user can manage
        :param subscribable_channels: channels user can subscribe
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :return: dict for Salt communication
        """
        try:
            current_roles = __salt__['uyuni.user_list_roles'](login, password=password)
            current_manageable_channels = __salt__['uyuni.channel_list_manageable_channels'](login, password)
            current_subscribe_channels = __salt__['uyuni.channel_list_my_channels'](login, password)
        except Exception as exc:
            return StateResult.state_error(login,
                                           comment="Error retrieving information about user channels '{}': {}".format(
                                               login, exc))

        if "org_admin" in current_roles or "channel_admin" in current_roles:
            return StateResult.state_error(login, "Channels access cannot be changed, because "
                                                  "the target user can manage all channels in the organization "
                                                  "(having an \"org_admin\" or \"channel_admin\" role).")

        current_manageable_channels_list = [c.get("label") for c in (current_manageable_channels or [])]
        current_subscribe_channels_list = [c.get("label") for c in (current_subscribe_channels or [])]

        changes = self.process_changes(current_manageable_channels_list,
                                       manageable_channels,
                                       current_subscribe_channels_list, subscribable_channels,
                                       org_admin_user, org_admin_password)

        if not changes:
            return StateResult.prepare_result(login, True,
                                              "{0} channels are already in the desired state".format(login))
        if __opts__['test']:
            return StateResult.prepare_result(login, None, "{0} channels would be configured".format(login), changes)

        try:
            for channel, action in changes.get('manageable_channels', {}).items():
                __salt__['uyuni.channel_software_set_user_manageable'](channel, login, action,
                                                                       org_admin_user, org_admin_password)

            for channel, action in changes.get('subscribable_channels', {}).items():
                __salt__['uyuni.channel_software_set_user_subscribable'](channel, login, action,
                                                                         org_admin_user, org_admin_password)
        except Exception as exc:
            return StateResult.state_error(login, "Error changing channel assignments '{}': {}".format(login, exc))
        return StateResult.prepare_result(login, True, "Channel set to the desired state", changes)


class UyuniGroups:

    @staticmethod
    def _update_systems(name: str, new_systems: List[int], current_systems: List[int],
                        org_admin_user: str = None, org_admin_password: str = None):

        remove_systems = [sys for sys in current_systems if sys not in new_systems]
        if remove_systems:
            __salt__['uyuni.systemgroup_add_remove_systems'](name, False, remove_systems,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

        add_systems = [sys for sys in new_systems if sys not in current_systems]
        if add_systems:
            __salt__['uyuni.systemgroup_add_remove_systems'](name, True, add_systems,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

    @staticmethod
    def _get_systems_for_group(target: str, target_type: str = "glob",
                               org_admin_user: str = None, org_admin_password: str = None):

        selected_minions = __salt__['uyuni.master_select_minions'](target, target_type)
        available_system_ids = __salt__['uyuni.systems_get_minion_id_map'](org_admin_user, org_admin_password)

        return [
            available_system_ids[minion_id] for minion_id in selected_minions.get('minions', [])
            if minion_id in available_system_ids
        ]

    def manage(self, name: str, description: str, target: str, target_type: str = "glob",
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Create or update a system group

        :param name: group name
        :param description: group description
        :param target: target expression used to filter which minions should be part of the group
        :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        current_group = None
        current_systems = None
        try:
            current_group = __salt__['uyuni.systemgroup_get_details'](name,
                                                                      org_admin_user=org_admin_user,
                                                                      org_admin_password=org_admin_password)
            current_systems = __salt__['uyuni.systemgroup_list_systems'](name,
                                                                         org_admin_user=org_admin_user,
                                                                         org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode != SERVER_GROUP_NOT_FOUND_ERROR:
                return StateResult.state_error(name,
                                               "Error retrieving information about system group '{}': {}".format(name,
                                                                                                                 exc))

        current_systems_ids = [sys['id'] for sys in (current_systems or [])]
        systems_to_group = self._get_systems_for_group(target, target_type,
                                                       org_admin_user=org_admin_user,
                                                       org_admin_password=org_admin_password)

        changes = {}
        if description != (current_group or {}).get('description'):
            changes['description'] = {'new': description}
            if current_group:
                changes['description']['old'] = current_group["description"]

        if Counter(current_systems_ids or []) != Counter(systems_to_group or []):
            changes['systems'] = {'new': systems_to_group}
            if current_group:
                changes['systems']['old'] = current_systems_ids

        if not changes:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(name))

        if not current_group:
            changes["name"] = {"new": name}

        if __opts__['test']:
            return StateResult.prepare_result(name, None, "{0} would be updated".format(name), changes)

        try:
            if current_group:
                __salt__['uyuni.systemgroup_update'](name, description,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)

                self._update_systems(name,
                                     systems_to_group,
                                     current_systems_ids,
                                     org_admin_user=org_admin_user,
                                     org_admin_password=org_admin_password)
            else:
                __salt__['uyuni.systemgroup_create'](name, description,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)
                self._update_systems(name,
                                     systems_to_group,
                                     current_systems_ids,
                                     org_admin_user=org_admin_user,
                                     org_admin_password=org_admin_password)
        except Exception as exc:
            return StateResult.state_error(name, "Error updating group. '{}': {}".format(name, exc))
        else:
            return StateResult.prepare_result(name, True, "{0} successfully updated".format(name), changes)

    def delete(self, name: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni system group

        :param name: Group Name
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        try:
            current_group = __salt__['uyuni.systemgroup_get_details'](name,
                                                                      org_admin_user=org_admin_user,
                                                                      org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == SERVER_GROUP_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(name))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting group (organization admin credentials error) '{}': {}"
                                               .format(name, exc))
            raise exc
        else:
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be removed".format(name))
            try:
                __salt__['uyuni.systemgroup_delete'](name,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)
                return StateResult.prepare_result(name, True, "Group {} has been deleted".format(name),
                                                  {'name': {'old': current_group.get('name')},
                                                   'description': {'old': current_group.get('description')}})
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting group '{}': {}".format(name, exc))


class UyuniOrgs:

    @staticmethod
    def _compute_changes(user_changes: Dict[str, Any],
                         current_user: Dict[str, Any]) -> Dict[str, Any]:
        changes = {}
        for field in ["email", "first_name", "last_name"]:
            if (current_user or {}).get(field) != user_changes.get(field):
                changes[field] = {"new": user_changes[field]}
                if current_user:
                    changes[field]["old"] = (current_user or {}).get(field)
        return changes

    def manage(self, name: str, org_admin_user: str, org_admin_password: str, first_name: str,
               last_name: str, email: str, pam: bool = False,
               admin_user=None, admin_password=None) -> Dict[str, Any]:
        """
        Create or update an Uyuni organization.
        Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

        :param name: organization name
        :param org_admin_user: organization admin user
        :param org_admin_password: organization admin password
        :param first_name: organization admin first name
        :param last_name: organization admin last name
        :param email: organization admin email
        :param pam: organization admin pam authentication
        :param admin_user: uyuni admin user
        :param admin_password: uyuni admin password
        :return: dict for Salt communication
        """
        current_org = None
        current_org_admin = None
        try:
            current_org = __salt__['uyuni.org_get_details'](name,
                                                            admin_user=admin_user,
                                                            admin_password=admin_password)
            current_org_admin = __salt__['uyuni.user_get_details'](org_admin_user,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode != ORG_NOT_FOUND_ERROR:
                return StateResult.state_error(name,
                                               "Error retrieving information about organization '{}': {}".format(name,
                                                                                                                 exc))

        user_paramters = {"login": org_admin_user, "password": org_admin_password, "email": email,
                          "first_name": first_name, "last_name": last_name,
                          "org_admin_user": org_admin_user, "org_admin_password": org_admin_password}

        changes = self._compute_changes(user_paramters, current_org_admin)
        if not current_org:
            changes["org_name"] = {"new": name}
            changes["org_admin_user"] = {"new": org_admin_user}
            changes["pam"] = {"new": pam}

        if not changes:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(name))
        if __opts__['test']:
            return StateResult.prepare_result(name, None, "{0} would be updated".format(name), changes)

        try:
            if current_org:
                __salt__['uyuni.user_set_details'](**user_paramters)
            else:
                __salt__['uyuni.org_create'](name=name,
                                             org_admin_user=org_admin_user, org_admin_password=org_admin_password,
                                             first_name=first_name, last_name=last_name, email=email,
                                             admin_user=admin_user, admin_password=admin_password, pam=pam)

        except Exception as exc:
            return StateResult.state_error(name, "Error updating organization '{}': {}".format(name, exc))
        else:
            return StateResult.prepare_result(name, True, "{0} org successfully modified".format(name), changes)

    def delete(self, name: str, admin_user=None, admin_password=None) -> Dict[str, Any]:
        """
        Remove an Uyuni organization
        Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

        :param name: Organization Name
        :param admin_user: administrator username
        :param admin_password: administrator password

        :return: dict for Salt communication
        """
        try:
            current_org = __salt__['uyuni.org_get_details'](name,
                                                            admin_user=admin_user,
                                                            admin_password=admin_password)
        except Exception as exc:
            if exc.faultCode == ORG_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(name))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting organization (admin credentials error) '{}': {}"
                                               .format(name, exc))
            raise exc
        else:
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be removed".format(name))
            try:
                __salt__['uyuni.org_delete'](name,
                                             admin_user=admin_user,
                                             admin_password=admin_password)
                return StateResult.prepare_result(name, True, "Org {} has been deleted".format(name),
                                                  {'name': {'old': current_org.get('name')}})
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting Org '{}': {}".format(name, exc))


class UyuniOrgsTrust:

    def trust(self, name: str, org_name: str, trusted_orgs: List[str],
              admin_user: str = None, admin_password: str = None) -> Dict[str, Any]:
        """
        Establish trust relationships between organizations

        :param name: state name
        :param org_name: organization name
        :param trusted_orgs: list of organization names to trust
        :param admin_user: administrator username
        :param admin_password: administrator password

        :return: dict for Salt communication
        """
        try:
            current_org_trusts = __salt__['uyuni.org_trust_list_trusts'](org_name,
                                                                         admin_user=admin_user,
                                                                         admin_password=admin_password)
            current_org = __salt__['uyuni.org_get_details'](org_name,
                                                            admin_user=admin_user, admin_password=admin_password)
        except Exception as exc:
            return StateResult.state_error(name,
                                           "Error retrieving information about an organization trust'{}': {}".format(
                                               org_name, exc))

        trusts_to_add = []
        trusts_to_remove = []
        for org_trust in current_org_trusts:
            if org_trust.get("orgName") in (trusted_orgs or []) and not org_trust.get("trustEnabled"):
                trusts_to_add.append(org_trust)
            elif org_trust.get("orgName") not in (trusted_orgs or []) and org_trust.get("trustEnabled"):
                trusts_to_remove.append(org_trust)

        if not trusts_to_add and not trusts_to_remove:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(org_name))
        if __opts__['test']:
            changes = {}
            for org_add in trusts_to_add:
                changes[org_add.get("orgName")] = {'old': None, 'new': True}
            for org_remove in trusts_to_remove:
                changes[org_remove.get("orgName")] = {'old': True, 'new': None}
            return StateResult.prepare_result(name, None, "{0} would be created".format(org_name), changes)

        processed_changes = {}
        try:
            for org_add in trusts_to_add:
                __salt__['uyuni.org_trust_add_trust'](current_org.get("id"), org_add.get("orgId"),
                                                      admin_user=admin_user, admin_password=admin_password)
                processed_changes[org_add.get("orgName")] = {'old': None, 'new': True}
            for org_remove in trusts_to_remove:
                __salt__['uyuni.org_trust_remove_trust'](current_org.get("id"), org_remove.get("orgId"),
                                                         admin_user=admin_user, admin_password=admin_password)
                processed_changes[org_remove.get("orgName")] = {'old': True, 'new': None}
        except Exception as exc:
            return StateResult.prepare_result(name, False,
                                              "Error updating organization trusts '{}': {}".format(org_name, exc),
                                              processed_changes)
        return StateResult.prepare_result(name, True, "Org '{}' trusts successfully modified".format(org_name),
                                          processed_changes)


class UyuniActivationKeys:

    @staticmethod
    def _normalize_list_packages(list_packages: [Any]):
        return [(f['name'], f.get('arch', None)) for f in (list_packages or [])]

    @staticmethod
    def _compute_changes(ak_parameters: Dict[str, Any],
                         current_ak: Dict[str, Any],
                         configure_after_registration: bool,
                         current_configure_after_registration: bool,
                         current_config_channels: List[str],
                         configuration_channels: List[str]) -> Dict[str, Any]:
        changes = {}
        for field in ["description", 'base_channel', 'usage_limit', 'universal_default', 'contact_method']:
            if current_ak.get(field) != ak_parameters.get(field):
                changes[field] = {"new": ak_parameters[field]}
                if current_ak:
                    changes[field]["old"] = current_ak.get(field)

        # list fields
        for field in ['system_types', 'child_channels', 'server_groups']:
            if sorted((ak_parameters or {}).get(field) or []) != sorted(current_ak.get(field) or []):
                changes[field] = {"new": ak_parameters[field]}
                if current_ak:
                    changes[field]["old"] = current_ak.get(field)

        new_packages = UyuniActivationKeys._normalize_list_packages((ak_parameters or {}).get('packages', []))
        old_packages = UyuniActivationKeys._normalize_list_packages((current_ak or {}).get('packages', []))
        if sorted(new_packages) != sorted(old_packages):
            changes['packages'] = {"new": ak_parameters['packages']}
            if current_ak:
                changes['packages']["old"] = current_ak.get('packages')

        if configure_after_registration != current_configure_after_registration:
            changes['configure_after_registration'] = {"new": configure_after_registration}
            if current_configure_after_registration is not None:
                changes['configure_after_registration']["old"] = current_configure_after_registration

        # we don't want to sort configuration channels since the order matters in this case
        if (current_config_channels or []) != (configuration_channels or []):
            changes['configuration_channels'] = {"new": configuration_channels}
            if current_config_channels:
                changes['configuration_channels']['old'] = current_config_channels

        return changes

    @staticmethod
    def _update_system_type(current_system_types, new_system_types,
                            key, org_admin_user, org_admin_password):
        add_system_types = [t for t in new_system_types if t not in current_system_types]
        if add_system_types:
            __salt__['uyuni.activation_key_add_entitlements'](key, add_system_types,
                                                              org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)

        remove_system_types = [t for t in current_system_types if t not in new_system_types]
        if remove_system_types:
            __salt__['uyuni.activation_key_remove_entitlements'](key, remove_system_types,
                                                                 org_admin_user=org_admin_user,
                                                                 org_admin_password=org_admin_password)

    @staticmethod
    def _update_child_channels(current_child_channels, new_child_channels,
                               key, org_admin_user, org_admin_password):
        add_child_channels = [t for t in new_child_channels if t not in current_child_channels]
        if add_child_channels:
            __salt__['uyuni.activation_key_add_child_channels'](key, add_child_channels,
                                                                org_admin_user=org_admin_user,
                                                                org_admin_password=org_admin_password)

        remove_child_channels = [t for t in current_child_channels if t not in new_child_channels]
        if remove_child_channels:
            __salt__['uyuni.activation_key_remove_child_channels'](key, remove_child_channels,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)

    @staticmethod
    def _update_server_groups(current_server_groups, new_server_groups,
                               key, org_admin_user, org_admin_password):
        add_server_groups = [t for t in new_server_groups if t not in current_server_groups]
        if add_server_groups:
            __salt__['uyuni.activation_key_add_server_groups'](key, add_server_groups,
                                                                org_admin_user=org_admin_user,
                                                                org_admin_password=org_admin_password)

        remove_server_groups = [t for t in current_server_groups if t not in new_server_groups]
        if remove_server_groups:
            __salt__['uyuni.activation_key_remove_server_groups'](key, remove_server_groups,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)



    @staticmethod
    def _format_packages_data(packages):
        return [{'name': f[0], **(({'arch': f[1]}) if f[1] else {})} for f in packages]

    @staticmethod
    def _update_packages(current_packages, new_packages, key, org_admin_user, org_admin_password):

        new_packages_normalized = UyuniActivationKeys._normalize_list_packages(new_packages)
        current_packages_normalized = UyuniActivationKeys._normalize_list_packages(current_packages)
        add_packages = [t for t in new_packages_normalized if t not in current_packages_normalized]
        if add_packages:
            pass
            __salt__['uyuni.activation_key_add_packages'](key,
                                                          UyuniActivationKeys._format_packages_data(add_packages),
                                                          org_admin_user=org_admin_user,
                                                          org_admin_password=org_admin_password)

        remove_packages = [t for t in current_packages_normalized if t not in new_packages_normalized]
        if remove_packages:
            pass
            __salt__['uyuni.activation_key_remove_packages'](key,
                                                             UyuniActivationKeys._format_packages_data(remove_packages),
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

    def manage(self, name: str, description: str,
               base_channel: str = '',
               usage_limit: int = 0,
               contact_method: str = 'default',
               system_types: List[str] = [],
               universal_default: bool = False,
               child_channels: List[str] = [],
               configuration_channels: List[str] = [],
               packages: List[str] = [],
               server_groups: List[str] = [],
               configure_after_registration: bool = False,
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Ensure an Uyuni Activation Key is present.

        :param name: the Activation Key name
        :param description: the Activation description
        :param base_channel: base channel to be used
        :param usage_limit: activation key usage limit
        :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
        :param system_types: system types to be assigned.
                             Can be one of: 'virtualization_host', 'container_build_host',
                             'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
        :param universal_default: sets this activation key as organization universal default
        :param child_channels: list of child channels to be assigned
        :param configuration_channels: list of configuration channels to be assigned
        :param packages: list of packages which will be installed
        :param server_groups: list of server groups to assign the activation key with
        :param configure_after_registration: deploy configuration files to systems on registration
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return:  dict for Salt communication
        """
        current_ak = {}
        key = None
        current_configure_after_registration = None
        system_groups_keys = {}
        current_config_channels = []
        output_field_names = {
            'description': 'description',
            'base_channel_label': 'base_channel',
            'usage_limit': 'usage_limit',
            'universal_default': 'universal_default',
            'contact_method': 'contact_method',
            'entitlements': 'system_types',
            'child_channel_labels': 'child_channels',
            'server_group_ids': 'server_groups',
            'packages': 'packages'
        }
        try:
            all_groups = __salt__['uyuni.systemgroup_list_all_groups'](org_admin_user, org_admin_password)
            group_id_to_name = {}
            for g in (all_groups or []):
                system_groups_keys[g.get('name')] = g.get('id')
                group_id_to_name[g.get('id')] = g.get('name')

            current_org_user = __salt__['uyuni.user_get_details'](org_admin_user, org_admin_password)

            key = "{}-{}".format(current_org_user['org_id'], name)
            returned_ak = __salt__['uyuni.activation_key_get_details'](key, org_admin_user=org_admin_user,
                                                                       org_admin_password=org_admin_password)

            for returned_name, output_name in output_field_names.items():
                current_ak[output_name] = returned_ak[returned_name]

            current_ak['server_groups'] = [group_id_to_name[s] for s in (current_ak['server_groups'] or [])]

            if current_ak.get('base_channel', None) == 'none':
                current_ak['base_channel'] = ''

            current_configure_after_registration = __salt__['uyuni.activation_key_check_config_deployment'](key,
                                                                                                            org_admin_user,
                                                                                                            org_admin_password)

            config_channels_output = __salt__['uyuni.activation_key_list_config_channels'](key,
                                                                                            org_admin_user,
                                                                                            org_admin_password)
            current_config_channels = [cc['label'] for cc in (config_channels_output or [])]

        except Exception as exc:
            if exc.faultCode != ACTIVATION_KEY_NOT_FOUND_ERROR:
                return StateResult.state_error(key, "Error retrieving information about Activation Key '{}': {}".format(key, exc))

        ak_paramters = {'description': description,
                        'base_channel': base_channel,
                        'usage_limit': usage_limit,
                        'contact_method': contact_method,
                        'system_types': system_types,
                        'universal_default': universal_default,
                        'child_channels': child_channels,
                        'server_groups': server_groups,
                        'packages': packages}

        changes = self._compute_changes(ak_paramters, current_ak,
                                        configure_after_registration,
                                        current_configure_after_registration,
                                        current_config_channels,
                                        configuration_channels)

        if not current_ak:
            changes["key"] = {"new": key}

        if not changes:
            return StateResult.prepare_result(key, True, "{0} is already in the desired state".format(key))
        if __opts__['test']:
            return StateResult.prepare_result(key, None, "{0} would be updated".format(key), changes)

        try:
            if current_ak:
                __salt__['uyuni.activation_key_set_details'](key,
                                                             description=description,
                                                             contact_method=contact_method,
                                                             base_channel_label=base_channel,
                                                             usage_limit=usage_limit,
                                                             universal_default=universal_default,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

                if changes.get('system_types', False):
                    self._update_system_type(current_ak.get('system_types', []), system_types or [],
                                             key, org_admin_user, org_admin_password)

            else:
                __salt__['uyuni.activation_key_create'](key=name,
                                                        description=description,
                                                        base_channel_label=base_channel,
                                                        usage_limit=usage_limit,
                                                        system_types=system_types,
                                                        universal_default=universal_default,
                                                        org_admin_user=org_admin_user,
                                                        org_admin_password=org_admin_password)

                __salt__['uyuni.activation_key_set_details'](key, contact_method=contact_method,
                                                             usage_limit=usage_limit,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

            if changes.get('child_channels', False):
                self._update_child_channels(current_ak.get('child_channels', []),
                                            child_channels or [],
                                            key, org_admin_user, org_admin_password)

            if changes.get('server_groups', False):
                old_server_groups_id = [system_groups_keys[s] for s in current_ak.get('server_groups', [])]
                new_server_groups_id = [system_groups_keys[s] for s in (server_groups or [])]
                self._update_server_groups(old_server_groups_id,
                                           new_server_groups_id,
                                           key, org_admin_user, org_admin_password)

            if changes.get('configure_after_registration', False):
                if configure_after_registration:
                    __salt__['uyuni.activation_key_enable_config_deployment'](key,
                                                                              org_admin_user=org_admin_user,
                                                                              org_admin_password=org_admin_password)
                else:
                    if current_ak:
                        __salt__['uyuni.activation_key_disable_config_deployment'](key,
                                                                                   org_admin_user=org_admin_user,
                                                                                   org_admin_password=org_admin_password)

            if changes.get('packages', False):
                self._update_packages(current_ak.get('packages', []),
                                           packages or [],
                                            key, org_admin_user, org_admin_password)

            if changes.get('configuration_channels', False):
                __salt__['uyuni.activation_key_set_config_channels']([key],
                                                                     config_channel_label=configuration_channels,
                                                                     org_admin_user=org_admin_user,
                                                                     org_admin_password=org_admin_password)

        except Exception as exc:
            return StateResult.state_error(key, "Error updating activation key '{}': {}".format(key, exc))
        else:
            return StateResult.prepare_result(key, True, "{0} activation key successfully modified".format(key), changes)

    def delete(self, name: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni Activation Key.

        :param name: the Activation Key Name
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        try:
            current_org_user = __salt__['uyuni.user_get_details'](org_admin_user, org_admin_password)
            key = "{}-{}".format(current_org_user['org_id'], name)
            ak = __salt__['uyuni.activation_key_get_details'](key, org_admin_user=org_admin_user,
                                                                       org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == ACTIVATION_KEY_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(key))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting Activation Key (organization credentials error) '{}': {}"
                                               .format(key, exc))
            raise exc
        else:
            changes = {
                'id': {'old': key},
            }
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be deleted".format(key), changes)

            try:
                __salt__['uyuni.activation_key_delete'](key,
                                                        org_admin_user=org_admin_user,
                                                        org_admin_password=org_admin_password)
                return StateResult.prepare_result(name, True, "Activation Key {} has been deleted".format(key), changes)
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting Activation Key '{}': {}".format(key, exc))


def __virtual__():
    return __virtualname__


def user_present(name, password, email, first_name, last_name, use_pam_auth=False,
                 roles=None, system_groups=None,
                 org_admin_user=None, org_admin_password=None):
    """
    Create or update an Uyuni user

    :param name: user login name
    :param password: desired password for the user
    :param email: valid email address
    :param first_name: First name
    :param last_name: Last name
    :param use_pam_auth: if you wish to use PAM authentication for this user
    :param roles: roles to assign to user
    :param system_groups: system_groups to assign to user
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniUsers().manage(name, password, email, first_name, last_name, use_pam_auth,
                               roles, system_groups,
                               org_admin_user, org_admin_password)


def user_channels(name, password,
                  manageable_channels=[], subscribable_channels=[],
                  org_admin_user=None, org_admin_password=None):
    """
    Ensure a user has access to the specified channels

    :param name: user login name
    :param password: user password
    :param manageable_channels: channels user can manage
    :param subscribable_channels: channels user can subscribe
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniUserChannels().manage(name, password,
                                      manageable_channels, subscribable_channels,
                                      org_admin_user, org_admin_password)


def user_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni user is not present.

    :param name: user login name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniUsers().delete(name, org_admin_user, org_admin_password)


def org_present(name, org_admin_user, org_admin_password,
                first_name, last_name, email, pam=False,
                admin_user=None, admin_password=None):
    """
    Create or update an Uyuni organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password
    :param first_name: organization admin first name
    :param last_name: organization admin last name
    :param email: organization admin email
    :param pam: organization admin pam authentication
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dict for Salt communication
    """
    return UyuniOrgs().manage(name, org_admin_user, org_admin_password, first_name,
                              last_name, email, pam,
                              admin_user, admin_password)


def org_absent(name, admin_user=None, admin_password=None):
    """
    Ensure an Uyuni organization is not present
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dict for Salt communication
    """
    return UyuniOrgs().delete(name, admin_user, admin_password)


def org_trust(name, org_name, trusts, admin_user=None, admin_password=None):
    """
    Establish trust relationships between Uyuni organizations.

    :param name: state name
    :param org_name: Organization name
    :param trusts: list of organization names to trust
    :param admin_user: administrator username
    :param admin_password: administrator password

    :return: dict for Salt communication
    """
    return UyuniOrgsTrust().trust(name, org_name, trusts, admin_user, admin_password)


def group_present(name, description, target=None, target_type="glob",
                  org_admin_user=None, org_admin_password=None):
    """
    Create or update an Uyuni system group

    :param name: group name
    :param description: group description
    :param target: target expression used to filter which minions should be part of the group
    :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
            pillar_exact, compound, compound_pillar_exact. Default: glob.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniGroups().manage(name, description, target, target_type,
                                org_admin_user, org_admin_password)


def group_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni system group is not present

    :param name: Group Name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniGroups().delete(name, org_admin_user, org_admin_password)


def activation_key_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni Activation Key is not present.

    :param name: the Activation Key name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniActivationKeys().delete(name, org_admin_user, org_admin_password)


def activation_key_present(name,
                           description,
                           base_channel='',
                           usage_limit=0,
                           contact_method='default',
                           system_types=[],
                           universal_default=False,
                           child_channels=[],
                           configuration_channels=[],
                           packages=[],
                           server_groups=[],
                           configure_after_registration=False,
                           org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni Activation Key is present.

    :param name: the Activation Key name
    :param description: the Activation description
    :param base_channel: base channel to be used
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
    :param system_types: system types to be assigned.
                         Can be one of: 'virtualization_host', 'container_build_host',
                         'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
    :param universal_default: sets this activation key as organization universal default
    :param child_channels: list of child channels to be assigned
    :param configuration_channels: list of configuration channels to be assigned
    :param packages: list of packages which will be installed
    :param server_groups: list of server groups to assign the activation key with
    :param configure_after_registration: deploy configuration files to systems on registration
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniActivationKeys().manage(name, description,
                                        base_channel=base_channel,
                                        usage_limit=usage_limit,
                                        contact_method=contact_method,
                                        system_types=system_types,
                                        universal_default=universal_default,
                                        child_channels=child_channels,
                                        configuration_channels=configuration_channels,
                                        packages=packages,
                                        server_groups=server_groups,
                                        configure_after_registration=configure_after_registration,
                                        org_admin_user=org_admin_user,
                                        org_admin_password=org_admin_password)
070701000000CB000081B400000000000000000000000163F87E300000180A000000000000000000000000000000000000002900000000susemanager-sls/src/states/virt_utils.py  """
virt utility functions
"""

import logging
import os
import re

from salt.exceptions import CommandExecutionError
try:
    import libvirt
except ImportError:
    pass

log = logging.getLogger(__name__)

__virtualname__ = "virt_utils"


def __virtual__():
    """
    Only if the virt module is loaded
    """
    return (
        __virtualname__
        if "virt.vm_info" in __salt__
        else (False, "Module virt_utils: virt module can't be loaded")
    )


def _all_running(name, kind, names, is_running):
    ret = {
        "name": name,
        "changes": {},
        "result": True if not __opts__["test"] else None,
        "comment": "",
    }

    stopped = []
    missing = []
    try:
        info = __salt__["virt.{}_info".format(kind)]()
        for obj_name in names:
            obj_info = info.get(obj_name)
            if not obj_info:
                missing.append(obj_name)
                continue

            if not is_running(obj_info):
                stopped.append(obj_name)

        if missing:
            ret["result"] = False
            ret["comment"] = "{} {}{} not defined".format(
                ", ".join(missing), kind, "s are" if len(missing) > 1 else " is"
            )
            return ret

        if not stopped:
            ret["comment"] = "all {}s are already running".format(kind)
            return ret

        for obj_name in stopped:
            if not __opts__["test"]:
                __salt__["virt.{}_start".format(kind)](obj_name)
            change = "started"
            ret["changes"][obj_name] = change

        ret["comment"] = "{} {}{} been started".format(
            ", ".join(stopped), kind, "s have" if len(stopped) > 1 else " has"
        )

    except Exception as err:
        ret["result"] = False
        ret["comment"] = str(err)

    return ret


def network_running(name, networks=None):
    """
    Ensure one or more already defined virtual networks are running.

    :param name: the name of one network to get running
    :param networks: the list of network names to get running
    """
    return _all_running(
        name, "network", networks or [name], lambda info: info.get("active", False)
    )


def pool_running(name, pools=None):
    """
    Ensure one or more already defined virtual storage pool are running.

    :param name: the name of one pool to get running
    :param pools: the list of pool names to get running
    """
    names = pools or [name]
    ret = _all_running(name, "pool", names, lambda info: info["state"] == "running")
    if ret["result"] is False:
        return ret

    # Refresh all pools
    for pool_name in names:
        try:
            # No need to refresh a pool that has just been started
            if pool_name in ret["changes"]:
                continue
            if not __opts__["test"]:
                __salt__["virt.pool_refresh"](pool_name)
            ret["changes"][pool_name] = "refreshed"

        except Exception as err:
            ret["result"] = False
            ret["comment"] = str(err)

    return ret


def vm_resources_running(name):
    """
    :param name: name of the VM for which to ensure networks and storage pools are running
    """
    ret = {
        "name": name,
        "changes": {},
        "result": True if not __opts__["test"] else None,
        "comment": "",
    }
    try:
        infos = __salt__["virt.vm_info"](name)
        if not infos.get(name):
            ret["result"] = False
            ret["comment"] = "Virtual machine {} does not exist".format(name)
            return ret

        vm_infos = infos.get(name)

        # Ensure all the networks are started
        networks = [
            nic["source"]["network"]
            for nic in vm_infos.get("nics", {}).values()
            if nic["type"] == "network"
        ]
        net_ret = network_running(name="{}_nets".format(name), networks=networks)

        # Ensure all the pools are started
        pools = [
            disk["file"].split("/")[0]
            for disk in vm_infos.get("disks", {}).values()
            if re.match("^[^/:]+/", disk["file"])
        ]
        pool_ret = pool_running(name="{}_pools".format(name), pools=pools)

        failed = any([net_ret["result"] is False, pool_ret["result"] is False])
        ret["result"] = False if failed else net_ret["result"]
        ret["comment"] = "{}, {}".format(net_ret["comment"], pool_ret["comment"])
        ret["changes"] = {"networks": net_ret["changes"], "pools": pool_ret["changes"]}

    except Exception as err:
        ret["result"] = False
        ret["comment"] = str(err)

    return ret


def cluster_vm_removed(name, primitive, definition_path):
    """
    Delete a VM managed by a cluster
    """
    ret = {
        'name': name,
        'changes': {},
        'result': False,
        'comment': '',
    }
    persistent = False
    active = False
    try:
        cnx = libvirt.open()
        domain = cnx.lookupByName(name)
        persistent = domain.isPersistent()
        active = bool(domain.isActive())
    except libvirt.libvirtError:
        # Since we expect a non-null primitive, this means the VM is stopped
        pass

    # Ensure we still have the VM defined after it is stopped
    if not persistent:
        __salt__['virt.define_xml_path'](definition_path)

    # Ask the cluster to stop the resource
    if active:
        try:
            __salt__['cmd.run']('crm resource stop ' + primitive, raise_err=True, python_shell=False)
        except CommandExecutionError:
            ret['comment'] = 'Failed to stop cluster resource ' + primitive
            return ret

    # Delete the VM
    if not __salt__['virt.purge'](name):
        ret['comment'] = 'Failed to remove the virtual machine and its files'
        return ret

    # Remove the cluster resource
    try:
        __salt__['cmd.run']('crm configure delete ' + primitive, python_shell=False)
    except CommandExecutionError:
        ret['comment'] = 'Failed to remove cluster resource ' + primitive
        return ret

    os.remove(definition_path)

    ret['changes'] = {"removed": name}
    ret['result'] = True
    return ret
  070701000000CC000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001A00000000susemanager-sls/src/tests 070701000000CD000081B400000000000000000000000163F87E30000000A8000000000000000000000000000000000000002400000000susemanager-sls/src/tests/README.md   ## Running tests

Run tests from _this_ directory. PyTest installed is required.
To run the tests, issue the following command:

  py.test <ENTER>

That's all for now.
070701000000CE000081B400000000000000000000000163F87E3000000000000000000000000000000000000000000000002600000000susemanager-sls/src/tests/__init__.py 070701000000CF000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001F00000000susemanager-sls/src/tests/data    070701000000D0000081B400000000000000000000000163F87E30000000ED000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/cpuinfo.ppc64le.sample processor	: 0
cpu		: POWER8E (raw), altivec supported
clock		: 3425.000000MHz
revision	: 2.1 (pvr 004b 0201)

timebase	: 512000000
platform	: pSeries
model		: IBM pSeries (emulated by qemu)
machine		: CHRP IBM pSeries (emulated by qemu)
   070701000000D1000081B400000000000000000000000163F87E3000000303000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/cpuinfo.s390.sample    vendor_id       : IBM/S390
# processors    : 1
bogomips per cpu: 2913.00
features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF,  identification = 0F9A27,  machine = 2827
 070701000000D2000081B400000000000000000000000163F87E30000010C4000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/data/cpuinfo.sample processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1314.117
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 0
initial apicid	: 0
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2100.109
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 1
initial apicid	: 1
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1718.742
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 2
initial apicid	: 2
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2108.335
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 3
initial apicid	: 3
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

070701000000D3000081B400000000000000000000000163F87E30000006CF000000000000000000000000000000000000003000000000susemanager-sls/src/tests/data/dmidecode.sample   # dmidecode 3.0
Getting SMBIOS data from sysfs.
SMBIOS 2.7 present.

Handle 0x0004, DMI type 4, 42 bytes
Processor Information
	Socket Designation: U3E1
	Type: Central Processor
	Family: Core i7
	Manufacturer: Intel(R) Corporation
	ID: D4 06 03 00 FF FB EB BF
	Signature: Type 0, Family 6, Model 61, Stepping 4
	Flags:
		FPU (Floating-point unit on-chip)
		VME (Virtual mode extension)
		DE (Debugging extension)
		PSE (Page size extension)
		TSC (Time stamp counter)
		MSR (Model specific registers)
		PAE (Physical address extension)
		MCE (Machine check exception)
		CX8 (CMPXCHG8 instruction supported)
		APIC (On-chip APIC hardware supported)
		SEP (Fast system call)
		MTRR (Memory type range registers)
		PGE (Page global enable)
		MCA (Machine check architecture)
		CMOV (Conditional move instruction supported)
		PAT (Page attribute table)
		PSE-36 (36-bit page size extension)
		CLFSH (CLFLUSH instruction supported)
		DS (Debug store)
		ACPI (ACPI supported)
		MMX (MMX technology supported)
		FXSR (FXSAVE and FXSTOR instructions supported)
		SSE (Streaming SIMD extensions)
		SSE2 (Streaming SIMD extensions 2)
		SS (Self-snoop)
		HTT (Multi-threading)
		TM (Thermal monitor supported)
		PBE (Pending break enabled)
	Version: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
	Voltage: 1.1 V
	External Clock: 100 MHz
	Max Speed: 3600 MHz
	Current Speed: 2600 MHz
	Status: Populated, Enabled
	Upgrade: Socket BGA1168
	L1 Cache Handle: 0x0005
	L2 Cache Handle: 0x0006
	L3 Cache Handle: 0x0007
	Serial Number: None
	Asset Tag: None
	Part Number: None
	Core Count: 2
	Core Enabled: 2
	Thread Count: 4
	Characteristics:
		64-bit capable
		Multi-Core
		Hardware Thread
		Execute Protection
		Enhanced Virtualization
		Power/Performance Control

 070701000000D4000081B400000000000000000000000163F87E30000000D7000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-1.sample  kgraft_patch_1_2_2
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-1-2.2.x86_64
    CVE: (none - this is an initial kGraft patch)
    bug fixes and enhancements: (none)

kgraft_patch_2_2_1
    active: 0
 070701000000D5000081B400000000000000000000000163F87E30000000CA000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-2.sample  kgraft_patch_1_2_2
    active: 0

kgraft_patch_2_2_1
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-2-2.1.x86_64
    CVE: CVE-2016-8666 CVE-2016-6480
    bug fixes and enhancements: (none)
  070701000000D6000081B400000000000000000000000163F87E3000000087000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/lscpu-json.aarch64.out {
    "cpu_model": "Cortex-A72",
    "cpu_vendor": "ARM",
    "cpu_numanodes": "4",
    "cpu_stepping": "r0p2",
    "cpu_cores": "4"
}
 070701000000D7000081B400000000000000000000000163F87E3000000206000000000000000000000000000000000000003900000000susemanager-sls/src/tests/data/lscpu-json.aarch64.sample  Architecture:        aarch64
Byte Order:          Little Endian
CPU(s):              64
On-line CPU(s) list: 0-63
Thread(s) per core:  1
Core(s) per socket:  4
Socket(s):           16
NUMA node(s):        4
Vendor ID:           ARM
Model:               2
Model name:          Cortex-A72
Stepping:            r0p2
BogoMIPS:            100.00
NUMA node0 CPU(s):   0-15
NUMA node1 CPU(s):   16-31
NUMA node2 CPU(s):   32-47
NUMA node3 CPU(s):   48-63
Flags:               fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
  070701000000D8000081B400000000000000000000000163F87E300000006F000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu-json.ppc64.out   {
    "cpu_model": "POWER9 (architected), altivec supported",
    "cpu_numanodes": "1",
    "cpu_cores": "1"
}
 070701000000D9000081B400000000000000000000000163F87E30000004F8000000000000000000000000000000000000003700000000susemanager-sls/src/tests/data/lscpu-json.ppc64.sample    Architecture:            ppc64le
  Byte Order:            Little Endian
CPU(s):                  8
  On-line CPU(s) list:   0-7
Model name:              POWER9 (architected), altivec supported
  Model:                 2.2 (pvr 004e 0202)
  Thread(s) per core:    8
  Core(s) per socket:    1
  Socket(s):             1
  Physical sockets:      2
  Physical chips:        1
  Physical cores/chip:   4
Virtualization features: 
  Hypervisor vendor:     pHyp
  Virtualization type:   para
Caches (sum of all):     
  L1d:                   64 KiB (2 instances)
  L1i:                   64 KiB (2 instances)
NUMA:                    
  NUMA node(s):          1
  NUMA node1 CPU(s):     0-7
Vulnerabilities:         
  Itlb multihit:         Not affected
  L1tf:                  Mitigation; RFI Flush, L1D private per thread
  Mds:                   Not affected
  Meltdown:              Mitigation; RFI Flush, L1D private per thread
  Spec store bypass:     Mitigation; Kernel entry/exit barrier (eieio)
  Spectre v1:            Mitigation; __user pointer sanitization, ori31 speculation barrier enabled
  Spectre v2:            Mitigation; Indirect branch cache disabled, Software link stack flush
  Srbds:                 Not affected
  Tsx async abort:       Not affected
070701000000DA000081B400000000000000000000000163F87E3000000051000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/lscpu-json.s390.out    {
    "cpu_vendor": "IBM/S390",
    "cpu_numanodes": "1",
    "cpu_cores": "1"
}
   070701000000DB000081B400000000000000000000000163F87E300000064C000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/lscpu-json.s390.sample Architecture:                    s390x
CPU op-mode(s):                  32-bit, 64-bit
Byte Order:                      Big Endian
CPU(s):                          2
On-line CPU(s) list:             0,1
Thread(s) per core:              1
Core(s) per socket:              1
Socket(s) per book:              1
Book(s) per drawer:              1
Drawer(s):                       2
NUMA node(s):                    1
Vendor ID:                       IBM/S390
Machine type:                    2964
CPU dynamic MHz:                 5000
CPU static MHz:                  5000
BogoMIPS:                        3033.00
Hypervisor:                      z/VM 6.4.0
Hypervisor vendor:               IBM
Virtualization type:             full
Dispatching mode:                horizontal
L1d cache:                       256 KiB
L1i cache:                       192 KiB
L2d cache:                       4 MiB
L2i cache:                       4 MiB
L3 cache:                        64 MiB
L4 cache:                        480 MiB
NUMA node0 CPU(s):               0,1
Vulnerability Itlb multihit:     Not affected
Vulnerability L1tf:              Not affected
Vulnerability Mds:               Not affected
Vulnerability Meltdown:          Not affected
Vulnerability Spec store bypass: Not affected
Vulnerability Spectre v1:        Mitigation; __user pointer sanitization
Vulnerability Spectre v2:        Mitigation; execute trampolines
Vulnerability Srbds:             Not affected
Vulnerability Tsx async abort:   Not affected
Flags:                           esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx sie
070701000000DC000081B400000000000000000000000163F87E30000000AC000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/lscpu-json.x86_64.out  {
    "cpu_model": "Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz",
    "cpu_vendor": "GenuineIntel",
    "cpu_numanodes": "1",
    "cpu_stepping": "3",
    "cpu_cores": "4"
}
070701000000DD000081B400000000000000000000000163F87E30000009B3000000000000000000000000000000000000003800000000susemanager-sls/src/tests/data/lscpu-json.x86_64.sample   Architecture:            x86_64
  CPU op-mode(s):        32-bit, 64-bit
  Address sizes:         39 bits physical, 48 bits virtual
  Byte Order:            Little Endian
CPU(s):                  8
  On-line CPU(s) list:   0-7
Vendor ID:               GenuineIntel
  Model name:            Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz
    CPU family:          6
    Model:               94
    Thread(s) per core:  2
    Core(s) per socket:  4
    Socket(s):           1
    Stepping:            3
    CPU max MHz:         3600.0000
    CPU min MHz:         800.0000
    BogoMIPS:            5399.81
    Flags:               fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bt
                         s rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadli
                         ne_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1
                          avx2 smep bmi2 erms invpcid mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
Virtualization features: 
  Virtualization:        VT-x
Caches (sum of all):     
  L1d:                   128 KiB (4 instances)
  L1i:                   128 KiB (4 instances)
  L2:                    1 MiB (4 instances)
  L3:                    8 MiB (1 instance)
NUMA:                    
  NUMA node(s):          1
  NUMA node0 CPU(s):     0-7
Vulnerabilities:         
  Itlb multihit:         KVM: Mitigation: Split huge pages
  L1tf:                  Mitigation; PTE Inversion; VMX conditional cache flushes, SMT vulnerable
  Mds:                   Mitigation; Clear CPU buffers; SMT vulnerable
  Meltdown:              Mitigation; PTI
  Spec store bypass:     Mitigation; Speculative Store Bypass disabled via prctl and seccomp
  Spectre v1:            Mitigation; usercopy/swapgs barriers and __user pointer sanitization
  Spectre v2:            Mitigation; Full generic retpoline, IBPB conditional, IBRS_FW, STIBP conditional, RSB filling
  Srbds:                 Mitigation; Microcode
  Tsx async abort:       Mitigation; TSX disabled
 070701000000DE000081B400000000000000000000000163F87E30000000C7000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu.ppc64le.sample   # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i
0,0,0,0,,0,0

 070701000000DF000081B400000000000000000000000163F87E30000000D1000000000000000000000000000000000000003100000000susemanager-sls/src/tests/data/lscpu.s390.sample  # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2d,L2i
0,0,0,,,0,0,0,0
   070701000000E0000081B400000000000000000000000163F87E3000000103000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/lscpu.x86_64.sample    # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,0,0,0,,0,0,0,0
2,1,0,0,,1,1,1,0
3,1,0,0,,1,1,1,0
 070701000000E1000081B400000000000000000000000163F87E30000004D9000000000000000000000000000000000000002B00000000susemanager-sls/src/tests/data/udev.sample    P: /devices/LNXSYSTM:00/LNXPWRBN:00
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00
E: DRIVER=button
E: MODALIAS=acpi:LNXPWRBN:
E: SUBSYSTEM=acpi

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: EV=3
E: ID_FOR_SEAT=input-acpi-LNXPWRBN_00
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: KEY=10000000000000 0
E: MODALIAS=input:b0019v0000p0001e0000-e0,1,k74,ramlsfw
E: NAME="Power Button"
E: PHYS="LNXPWRBN/button/input0"
E: PRODUCT=19/0/1/0
E: PROP=0
E: SUBSYSTEM=input
E: TAGS=:seat:
E: USEC_INITIALIZED=2010022

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
N: input/event2
E: BACKSPACE=guess
E: DEVNAME=/dev/input/event2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: MAJOR=13
E: MINOR=66
E: SUBSYSTEM=input
E: TAGS=:power-switch:
E: USEC_INITIALIZED=2076101
E: XKBLAYOUT=us
E: XKBMODEL=pc105

P: /devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVPATH=/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVTYPE=scsi_device
E: DRIVER=sd
E: MODALIAS=scsi:t-0x00
E: SUBSYSTEM=scsi
   070701000000E2000081B400000000000000000000000163F87E30000000E7000000000000000000000000000000000000003900000000susemanager-sls/src/tests/data/virt_state-test.initcache  (dp1
S'domain_data'
p2
(dp3
I5
(dp4
S'name'
p5
S'testvm'
p6
sS'virt_type'
p7
S'para_virtualized'
p8
sS'state'
p9
S'running'
p10
sS'vcpus'
p11
I2
sS'memory_size'
p12
S'1024'
p13
sS'uuid'
p14
I5
sssS'expire_time'
p15
L2141506800L
s.
 070701000000E3000081B400000000000000000000000163F87E30000004AD000000000000000000000000000000000000002500000000susemanager-sls/src/tests/mockery.py  import sys
import os
try:
    from cStringIO import StringIO
except ImportError:
    from io import StringIO
from mock import MagicMock


def setup_environment():
    '''
    Mock the environment.
    :return:
    '''
    if 'salt' not in sys.modules or not isinstance(sys.modules['salt'], MagicMock):
        sys.modules['salt'] = MagicMock()
        sys.modules['salt.cache'] = MagicMock()
        sys.modules['salt.config'] = MagicMock()
        sys.modules['salt.utils'] = MagicMock()
        sys.modules['salt.utils.versions'] = MagicMock()
        sys.modules['salt.utils.odict'] = MagicMock()
        sys.modules['salt.utils.minions'] = MagicMock()
        sys.modules['salt.utils.network'] = MagicMock()
        sys.modules['salt.modules'] = MagicMock()
        sys.modules['salt.modules.cmdmod'] = MagicMock()
        sys.modules['salt.modules.virt'] = MagicMock()
        sys.modules['salt.states'] = MagicMock()
        sys.modules['salt.exceptions'] = MagicMock(CommandExecutionError=Exception)


def get_test_data(filename):
    '''
    Get a test data.

    :param filename:
    :return:
    '''
    return open(os.path.sep.join([os.path.abspath(''), 'data', filename]), 'r').read()
   070701000000E4000081B400000000000000000000000163F87E3000000A7D000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_beacon_pkgset.py   """
Author: Bo Maryniuk <bo@suse.de>
"""

from mock import MagicMock, patch

from . import mockery

mockery.setup_environment()

with patch(
    "salt.config.minion_config", return_value={"cachedir": "/var/cache/salt/minion"}
):
    from ..beacons import pkgset


pkgset.__context__ = dict()


@patch.object(pkgset.os.path, "exists", MagicMock(return_value=True))
@patch.object(pkgset, "__context__", {pkgset.__virtualname__: ""})
@patch.object(pkgset, "CACHE", MagicMock())
def test_beacon():
    """
    Test beacon functionality.
    """

    mock_content = MagicMock(
        **{
            "return_value.__enter__.return_value.read.return_value.strip.return_value": "test"
        }
    )

    """
    The __context__ has no pkgset data, the cache contains the same data as in the cookie.
    """
    with patch.object(pkgset, "open", mock_content), patch.object(
        pkgset, "__context__", {}
    ) as mock_context, patch.object(
        pkgset.CACHE, "fetch", return_value={}
    ), patch.object(
        pkgset.CACHE, "store"
    ) as mock_cache_store:
        data = pkgset.beacon({})
        assert mock_context["pkgset"] == "test"
        assert data == []
        mock_cache_store.assert_called_once()

    """
    The __context__ has no pkgset data, the cache contains the different data than the cookie.
    """
    with patch.object(pkgset, "open", mock_content), patch.object(
        pkgset, "__context__", {}
    ) as mock_context, patch.object(
        pkgset.CACHE, "fetch", return_value={"data": "other"}
    ), patch.object(
        pkgset.CACHE, "store"
    ) as mock_cache_store:
        data = pkgset.beacon({})
        assert mock_context["pkgset"] == "test"
        assert data == [{"tag": "changed"}]
        mock_cache_store.assert_called_once()

    """
    The __context__ has pkgset data, but the data is different than the cookie.
    """
    with patch.object(pkgset, "open", mock_content), patch.object(
        pkgset, "__context__", {"pkgset": "other"}
    ) as mock_context, patch.object(pkgset.CACHE, "store") as mock_cache_store:
        data = pkgset.beacon({})
        assert mock_context["pkgset"] == "test"
        assert data == [{"tag": "changed"}]
        mock_cache_store.assert_called_once()

    """
    The __context__ has pkgset data, the data is the same as the cookie.
    """
    with patch.object(pkgset, "open", mock_content), patch.object(
        pkgset, "__context__", {"pkgset": "test"}
    ) as mock_context, patch.object(pkgset.CACHE, "store") as mock_cache_store:
        data = pkgset.beacon({})
        assert mock_context["pkgset"] == "test"
        assert data == []
        mock_cache_store.assert_not_called()
   070701000000E5000081B400000000000000000000000163F87E3000000DF3000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_grains_cpuinfo.py  '''
Author: bo@suse.de
'''

import json
import pytest
from mock import MagicMock, patch, mock_open
from . import mockery
mockery.setup_environment()

from ..grains import cpuinfo


def test_total_num_cpus():
    '''
    Test total_num_cpus function.

    :return:
    '''
    os_listdir = ['cpu0', 'cpu1', 'cpu2', 'cpu3', 'cpufreq', 'cpuidle', 'power', 'modalias',
                  'kernel_max', 'possible', 'online', 'offline', 'isolated', 'uevent',
                  'intel_pstate', 'microcode', 'present']

    with patch('os.path.exists', MagicMock(return_value=True)):
        with patch('os.listdir', MagicMock(return_value=os_listdir)):
            cpus = cpuinfo.total_num_cpus()
            assert type(cpus) == dict
            assert 'total_num_cpus' in cpus
            assert cpus['total_num_cpus'] == 4


def test_cpusockets_dmidecode_count_sockets():
    '''
    Test _dmidecode_count_sockets sub in cpusockets function.

    :return:
    '''

    sample = mockery.get_test_data('dmidecode.sample')
    cpuinfo.log = MagicMock()
    with patch.dict(cpuinfo.__salt__, {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': sample})}):
        out = cpuinfo._dmidecode_count_sockets([])
        assert type(out) == dict
        assert 'cpusockets' in out
        assert out['cpusockets'] == 1


def test_cpusockets_cpuinfo_count_sockets():
    '''
    Test _cpuinfo_count_sockets sub in cpusockets function.

    :return:
    '''
    cpuinfo.log = MagicMock()
    # cpuinfo parser is not applicable for non-Intel architectures, so should return nothing.
    for sample_name in ['cpuinfo.s390.sample', 'cpuinfo.ppc64le.sample']:
        with patch('os.access', MagicMock(return_value=True)):
            with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data(sample_name)), create=True):
                assert cpuinfo._cpuinfo_count_sockets([]) is None

    with patch('os.access', MagicMock(return_value=True)):
        with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data('cpuinfo.sample')), create=True):
            out = cpuinfo._cpuinfo_count_sockets([])
            assert type(out) == dict
            assert 'cpusockets' in out
            assert out['cpusockets'] == 1


@pytest.mark.parametrize("arch", ["ppc64le", "s390", "x86_64"])
def test_cpusockets_lscpu_count_sockets(arch):
    '''
    Test _lscpu_count_sockets sub in cpusockets function.

    :return:
    '''
    fn_smpl = 'lscpu.{}.sample'.format(arch)
    cpuinfo.log = MagicMock()
    with patch.dict(cpuinfo.__salt__,
                    {'cmd.run_all': MagicMock(return_value={'retcode': 0,
                                                            'stdout': mockery.get_test_data(fn_smpl)})}):
        out = cpuinfo._lscpu_count_sockets([])
        assert type(out) == dict
        assert 'cpusockets' in out
        assert out['cpusockets'] == 1


@pytest.mark.parametrize("arch", ["x86_64", "aarch64", "s390", "ppc64"])
def test_cpusockets_cpu_data(arch):
    '''
    Test lscpu -J data extraction function.

    :return:
    '''
    cpuinfo.log = MagicMock()
    sample_data = mockery.get_test_data("lscpu-json.{}.sample".format(arch))
    with patch.dict(cpuinfo.__salt__,
                    {'cmd.run_all': MagicMock(return_value={'retcode': 0,
                                                            'stdout': sample_data})}):
        out = cpuinfo.cpu_data()
        assert type(out) == dict
        expected = json.loads(mockery.get_test_data("lscpu-json.{}.out".format(arch)))
        assert out == expected

 070701000000E6000081B400000000000000000000000163F87E30000008C3000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/test_grains_virt.py import pytest
from mock import MagicMock, patch, Mock
from . import mockery
mockery.setup_environment()

from ..grains import virt


@pytest.mark.parametrize("network", [True, False])
def test_features_network(network):
    """
    test the network part of the features function
    """
    virt_funcs = {}
    if network:
        virt_funcs["network_update"] = MagicMock(return_value = True)
    with patch.dict(virt.salt.modules.virt.__dict__, virt_funcs):
        assert virt.features()["virt_features"]["enhanced_network"] == network


@pytest.mark.parametrize("cluster, start_resources", [(True, True), (True, False), (False, False)])
def test_features_cluster(cluster, start_resources):
    """
    test the cluster part of the features function
    """
    param = "" if not start_resources else """
      <parameter name="start_resources">
        <content type="boolean" default="false"/>
      </parameter>"""
    crm_resources = """<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="VirtualDomain">
  <parameters>  
    {}
  </parameters>
</resource-agent>
""".format(param)
    popen_mock = MagicMock(side_effect=OSError())
    check_call_mock = MagicMock(side_effect=OSError())
    if cluster:
        popen_mock = MagicMock()
        popen_mock.return_value.communicate.side_effect = [(crm_resources, None), (b"libvirtd (libvirt) 5.1.0\n", None)]
        check_call_mock = MagicMock(return_value = 0)

    with patch.object(virt.subprocess, "check_call", check_call_mock):
        with patch.object(virt.subprocess, "Popen", popen_mock):
            actual = virt.features()["virt_features"]
            assert actual["cluster"] == cluster
            assert actual["resource_agent_start_resources"] == start_resources


@pytest.mark.parametrize("version, expected", [("5.1.0", False), ("7.3.0", True)])
def test_features_efi(version, expected):
    """
    Test the uefi auto discovery feature
    """
    popen_mock = MagicMock()
    popen_mock.return_value.communicate.return_value = ("libvirtd (libvirt) {}\n".format(version).encode(), None)

    with patch.object(virt.subprocess, "Popen", popen_mock):
        assert virt.features()["virt_features"]["uefi_auto_loader"] == expected
 070701000000E7000081B400000000000000000000000163F87E30000005F6000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_mgr_master_tops.py # -*- coding: utf-8 -*-
'''
:codeauthor:    Pablo Suárez Hernández <psuarezhernandez@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

import sys
sys.path.append("../../modules/tops")

import mgr_master_tops

TEST_MANAGER_STATIC_TOP = {
    "base": [
        "channels",
        "certs",
        "packages",
        "custom",
        "custom_groups",
        "custom_org",
        "formulas",
        "services.salt-minion",
        "services.docker",
        "services.kiwi-image-server",
        "ansible"
    ]
}


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert mgr_master_tops.__virtual__() == "mgr_master_tops"


def test_top_default_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when no environment has been specified.
    '''
    kwargs = {'opts': {'environment': None}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_base_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when environment is set to "base".
    '''
    kwargs = {'opts': {'environment': 'base'}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_unknown_saltenv():
    '''
    Test if top function is returning None for unknown salt environments.
    '''
    kwargs = {'opts': {'environment': 'otherenv'}}
    assert mgr_master_tops.top(**kwargs) == None
  070701000000E8000081B400000000000000000000000163F87E30000004A1000000000000000000000000000000000000003A00000000susemanager-sls/src/tests/test_module_mainframesysinfo.py '''
Author: Bo Maryniuk <bo@suse.de>
'''

import pytest
from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import mainframesysinfo


def test_virtual():
    '''
    Test virtual returns True if setup os.access returns positive, and otherwise.

    :return:
    '''

    with patch('os.access', MagicMock(return_value=True)):
        assert mainframesysinfo.__virtual__() is True

    with patch('os.access', MagicMock(return_value=False)):
        assert mainframesysinfo.__virtual__() is False


def test_read_values():
    '''
    Test the read_values method.

    :return:
    '''
    bogus_data = "bogus data"
    run_all = {'stdout': bogus_data, 'retcode': 0, 'stderr': ''}
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        assert mainframesysinfo.read_values() == bogus_data

    run_all['retcode'] = 1
    run_all['stderr'] = 'error here'
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        with pytest.raises(Exception) as x:
            mainframesysinfo.read_values()
        assert str(x.value) == run_all['stderr']
   070701000000E9000081B400000000000000000000000163F87E3000000E50000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_mgrnet.py   import sys

from unittest.mock import MagicMock, patch
from . import mockery

mockery.setup_environment()

from ..modules import mgrnet


mgrnet.__salt__ = {}
mgrnet.__utils__ = {}


def test_mgrnet_virtual():
    """
    Test __virtual__ function for the possible cases
    when either 'host' or 'nslookup' is available or none of them
    """

    with patch.dict(
        mgrnet.__utils__,
        {"path.which": MagicMock(side_effect=[True, False, True, False, False])},
    ):
        ret = mgrnet.__virtual__()
        assert ret is True

        ret = mgrnet.__virtual__()
        assert ret is True

        ret = mgrnet.__virtual__()
        assert ret[0] is False


def test_mgrnet_dns_fqdns():
    """
    Test getting possible FQDNs with DNS tools
    """

    check_calls = {"host": [], "nslookup": []}

    ipv4_addresses = ["10.0.0.1", "172.16.0.1", "192.168.0.1", "10.10.1.1"]
    ipv6_addresses = ["fd12:3456:789a:1::1", "fd12:abcd:1234:1::1"]

    names = {
        "10.0.0.1": "host10.example.org",
        "172.16.0.1": "host172.example.org",
        "192.168.0.1": "host10.example.org",
        "fd12:3456:789a:1::1": "ipv6host3456.example.org",
        "fd12:abcd:1234:1::1": "ipv6hostabcd.example.org",
    }

    def _cmd_run_host_nslookup(cmd, ignore_retcode=False):
        """
        This function is emulating the output of 'host' or 'nslookup'
        """
        ip = cmd[1]
        cmd = cmd[0]
        check_calls[cmd].append(ip)
        rc = 0
        if ":" in ip:
            # the conversion is not very accurate here, but it's enough for testing
            ptr = "{}.ip6.arpa".format(".".join(reversed([*ip.replace(":", "")])))
        else:
            ptr = "{}.in-addr.arpa".format(".".join(reversed(ip.split())))
        if cmd == "host":
            if ip in names:
                out = "{} domain name pointer {}.\n".format(ptr, names[ip])
            else:
                out = "Host {}. not found: 3(NXDOMAIN)\n".format(ptr)
                rc = 1
        else:
            if ip in names:
                out = "{}\tname = {}.\n".format(ptr, names[ip])
            else:
                out = "** server can't find {}: NXDOMAIN\n".format(ptr)
                rc = 1
        return {"retcode": rc, "stdout": out}

    with patch.dict(
        mgrnet.__salt__, {"cmd.run_all": _cmd_run_host_nslookup}
    ), patch.dict(
        mgrnet.__utils__,
        {"path.which": MagicMock(side_effect=[True, False, True, False, False])},
    ), patch.object(
        mgrnet.salt.utils.network,
        "ip_addrs",
        MagicMock(side_effect=[ipv4_addresses.copy(), ipv4_addresses.copy()]),
    ), patch.object(
        mgrnet.salt.utils.network,
        "ip_addrs6",
        MagicMock(side_effect=[ipv6_addresses.copy(), ipv6_addresses.copy()]),
    ):
        # Test 'host' util output
        ret = mgrnet.dns_fqdns()
        assert sorted(ret["dns_fqdns"]) == sorted(set(names.values()))

        # Test 'nslookup' util output
        ret = mgrnet.dns_fqdns()
        assert sorted(ret["dns_fqdns"]) == sorted(set(names.values()))

        # Check if 'host' and 'nslookup' were called for all of IPv4 and IPv6 addresses
        for ip in ipv4_addresses:
            assert ip in check_calls["host"]
            assert ip in check_calls["nslookup"]
        for ip in ipv6_addresses:
            assert ip in check_calls["host"]
            assert ip in check_calls["nslookup"]

        assert len(check_calls["host"]) == len(ipv4_addresses) + len(ipv6_addresses)

        # Test the case when neither 'host' nor 'nslookup' is present on the system
        ret = mgrnet.dns_fqdns()
        assert ret == {"dns_fqdns": []}
070701000000EA000081B400000000000000000000000163F87E3000000608000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_module_sumautil.py '''
Author: mc@suse.com
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import sumautil


def test_livepatching_kernelliveversion():
    '''
    Test kernel_live_version.

    :return:
    '''

    sumautil.log = MagicMock()
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-1.sample')}
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_1_2_2'

        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-2.sample') }
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_2_2_1'

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        out = sumautil.get_kernel_live_version()
        assert out is None
070701000000EB000081B400000000000000000000000163F87E3000000F03000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_udevdb.py   '''
Author: Bo Maryniuk <bo@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import udevdb


def test_virtual():
    '''
    Test virtual returns True if 'udevadm' is around in the environment.

    :return:
    '''
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        assert udevdb.__virtual__() is False

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        assert udevdb.__virtual__() is True


def test_normalize():
    '''
    Test if udevdb.normalize does not returns nested lists that contains only one item.

    :return:
    '''
    data = {'key': ['value', 'here'], 'foo': ['bar'], 'some': 'data'}
    assert udevdb.normalize(data) == {'foo': 'bar', 'some': 'data', 'key': ['value', 'here']}


def test_exportdb():
    '''
    Test udevdb.exportdb method.

    :return:
    '''
    udev_data = mockery.get_test_data('udev.sample')
    out = [{'P': '/devices/LNXSYSTM:00/LNXPWRBN:00',
            'E': {'MODALIAS': 'acpi:LNXPWRBN:',
                  'SUBSYSTEM': 'acpi',
                  'DRIVER': 'button',
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00'}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
            'E': {'SUBSYSTEM': 'input',
                  'PRODUCT': '19/0/1/0',
                  'PHYS': '"LNXPWRBN/button/input0"',
                  'NAME': '"Power Button"',
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
                  'MODALIAS': 'input:b0019v0000p0001e0000-e0,1,k74,ramlsfw',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'TAGS': ':seat:',
                  'PROP': 0,
                  'ID_FOR_SEAT': 'input-acpi-LNXPWRBN_00',
                  'KEY': '10000000000000 0',
                  'USEC_INITIALIZED': 2010022,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'EV': 3,
                  'ID_INPUT_KEY': 1}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
            'E': {'SUBSYSTEM': 'input',
                  'XKBLAYOUT': 'us',
                  'MAJOR': 13,
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'DEVNAME': '/dev/input/event2',
                  'TAGS': ':power-switch:',
                  'BACKSPACE': 'guess',
                  'MINOR': 66,
                  'USEC_INITIALIZED': 2076101,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'XKBMODEL': 'pc105',
                  'ID_INPUT_KEY': 1},
            'N': 'input/event2'},
           {'P': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0',
            'E': {'MODALIAS': 'scsi:t-0x00',
                  'SUBSYSTEM': 'scsi',
                  'DEVTYPE': 'scsi_device',
                  'DRIVER': 'sd',
                  'DEVPATH': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0'
                  },
            'X-Mgr': {'SCSI_SYS_TYPE': '0'}},
           ]

    with patch.dict(udevdb.__salt__, {'cmd.run_all': MagicMock(side_effect=[{'retcode': 0, 'stdout': udev_data},
                                                                            {'retcode': 0, 'stdout': '0'}])}):
        data = udevdb.exportdb()
        assert data == [_f for _f in data if _f]

        for d_idx, d_section in enumerate(data):
            assert out[d_idx]['P'] == d_section['P']
            assert out[d_idx].get('N') == d_section.get('N')
            assert out[d_idx].get('X-Mgr') == d_section.get('X-Mgr')
            for key, value in list(d_section['E'].items()):
                assert out[d_idx]['E'][key] == value
 070701000000EC000081B400000000000000000000000163F87E3000001ADD000000000000000000000000000000000000003600000000susemanager-sls/src/tests/test_module_uyuni_config.py '''
Author: Ricardo Mateus <rmateus@suse.com>
'''

import pytest
from mock import MagicMock, patch, call
from . import mockery

mockery.setup_environment()

import sys

from ..modules import uyuni_config
from ..modules.uyuni_config import RPCClient, UyuniChannelsException, UyuniUsersException

class TestRPCClient:
    """
    Test RPCClient object
    """
    rpc_client = None

    @patch("src.modules.uyuni_config.ssl", MagicMock())
    @patch("src.modules.uyuni_config.xmlrpc", MagicMock())
    def setup_method(self, method):
        """
        Setup state per test.

        :param method:
        :return:
        """
        self.rpc_client = RPCClient(user="user", password="password", url="https://somewhere")
        self.rpc_client.conn.auth.login = MagicMock(return_value="My_token")
        self.rpc_client.conn = MagicMock()

    def teardown_method(self, method):
        """
        Tear-down state per test.

        :param method:
        :return:
        """
        self.rpc_client = None
        uyuni_config.__pillar__ = {}

    def test_init_called(self):
        """
        Init method called

        :return:
        """
        assert self.rpc_client.get_user() == 'user'
        assert self.rpc_client.token is None

    def test_init_called_without_pillar(self):
        """
        Init method called without user password and without any pillar data

        :return:
        """
        with pytest.raises(UyuniUsersException):
            RPCClient(user="user")

    def test_init_called_with_pillar(self):
        """
        Init method called without user password and with pillar data defined

        :return:
        """
        uyuni_config.__pillar__ = {
            "uyuni": {
                "xmlrpc": {
                    "user": "admin_user",
                    "password": "password_user"
                }
            }
        }

        rpc_client = RPCClient(user="user")
        assert rpc_client.get_user() == 'admin_user'
        assert rpc_client._user == 'admin_user'
        assert rpc_client._password == 'password_user'
        assert rpc_client.token is None

    def test_get_token(self):
        """
        Test get_token method with reuse token

        :return:
        """
        my_mock1 = MagicMock(return_value="My_Special_Token")
        my_mock2 = MagicMock(return_value="My_Special_Token_2")
        self.rpc_client.conn.auth.login = my_mock1
        token = self.rpc_client.get_token()

        assert my_mock1.call_count == 1
        assert token == "My_Special_Token"
        assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token"

        self.rpc_client.get_token()
        assert my_mock1.call_count == 1

        self.rpc_client.conn.auth.login = my_mock2
        self.rpc_client.get_token()
        assert my_mock1.call_count == 1
        assert my_mock2.call_count == 0

        token = self.rpc_client.get_token(True)
        assert my_mock1.call_count == 1
        assert my_mock2.call_count == 1
        assert token == "My_Special_Token_2"
        assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token_2"

    def test_call_rpc(self):
        """
        Call any XML-RPC method.

        :return:
        """
        self.rpc_client.token = "My_token"
        out = self.rpc_client("uyuni.some_method")
        mo = getattr(self.rpc_client.conn, "uyuni.some_method")
        assert out is not None
        assert mo.called
        mo.assert_called_with("My_token")

        out2 = self.rpc_client("uyuni.some_method_2", "my_arg")
        mo2 = getattr(self.rpc_client.conn, "uyuni.some_method_2")
        assert out2 is not None
        assert mo2.called
        mo2.assert_called_with("My_token", "my_arg")

    def test_call_rpc_crash_handle_generic(self):
        """
        Handle XML-RPC method crash wiht generic error

        :return:
        """
        self.rpc_client.token = "the_token"
        exc = Exception("generic error when processing")
        exc.faultCode = 2951
        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=exc))

        with patch("src.modules.uyuni_config.log") as logger:
            with pytest.raises(Exception):
                self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            assert mo.called
            mo.assert_called_with("the_token")
            assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')

    def test_call_rpc_crash_handle_reauthenticate_error(self):
        """
        Handle XML-RPC method crash whit reauthenticate error

        :return:
        """
        self.rpc_client.token = "the_token"
        self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")

        exc = Exception("generic error when processing")
        exc.faultCode = 2950
        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=exc))

        with patch("src.modules.uyuni_config.log") as logger:
            with pytest.raises(Exception):
                self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            assert mo.call_count == 2
            mo.assert_has_calls([call("the_token"), call("the_token_new")])
            self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
            assert self.rpc_client.get_token() == "the_token_new"
            assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')

    def test_call_rpc_handle_reauthenticate(self):
        """
        Handle XML-RPC method and reauthenticate

        :return:
        """
        self.rpc_client.token = "the_token"
        self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")

        exc = Exception("generic error when processing")
        exc.faultCode = 2950

        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=[exc, "return string"]))

        assert self.rpc_client.get_token() == "the_token"
        with patch("src.modules.uyuni_config.log") as logger:
            out = self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            # pdb.set_trace()
            assert out is not None
            assert out == 'return string'
            assert mo.call_count == 2
            mo.assert_has_calls([call("the_token"), call("the_token_new")])
            self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
            assert self.rpc_client.get_token() == "the_token_new"
            assert logger.warning.call_args[0] == ('Fall back to the second try due to %s', 'generic error when processing')

   070701000000ED000081B400000000000000000000000163F87E3000002AC0000000000000000000000000000000000000003400000000susemanager-sls/src/tests/test_module_virt_utils.py   """
Unit tests for the virt_utils module
"""
from mock import Mock, MagicMock, patch, mock_open
from xml.etree import ElementTree
import pytest

from ..modules import virt_utils
from . import mockery

mockery.setup_environment()

virt_utils.__salt__ = {}


CRM_CONFIG_XML = b"""<?xml version="1.0" ?>
<cib>
  <configuration>
    <nodes>
      <node id="1084783225" uname="demo-kvm1"/>
      <node id="1084783226" uname="demo-kvm2"/>
    </nodes>
    <resources>
      <clone id="c-clusterfs">
        <meta_attributes id="c-clusterfs-meta_attributes">
          <nvpair name="interleave" value="true" id="c-clusterfs-meta_attributes-interleave"/>
          <nvpair name="clone-max" value="8" id="c-clusterfs-meta_attributes-clone-max"/>
          <nvpair id="c-clusterfs-meta_attributes-target-role" name="target-role" value="Started"/>
        </meta_attributes>
        <group id="ocfs2-group">
          <primitive id="clusterfs" class="ocf" provider="heartbeat" type="Filesystem">
            <instance_attributes id="clusterfs-instance_attributes">
              <nvpair name="directory" value="/srv/clusterfs" id="clusterfs-instance_attributes-directory"/>
              <nvpair name="fstype" value="ocfs2" id="clusterfs-instance_attributes-fstype"/>
              <nvpair name="device" value="/dev/vdc" id="clusterfs-instance_attributes-device"/>
            </instance_attributes>
          </primitive>
        </group>
      </clone>
      <primitive id="vm01" class="ocf" provider="heartbeat" type="VirtualDomain">
        <instance_attributes id="vm01-instance_attributes">
          <nvpair name="config" value="/srv/clusterfs/vm01.xml" id="vm01-instance_attributes-config"/>
        </instance_attributes>
      </primitive>
      <primitive id="vm03" class="ocf" provider="heartbeat" type="VirtualDomain">
        <instance_attributes id="vm03-instance_attributes">
          <nvpair name="config" value="/srv/clusterfs/vm03.xml" id="vm03-instance_attributes-config"/>
        </instance_attributes>
      </primitive>
    </resources>
  </configuration>
</cib>
"""

CRM_MON_XML = b"""
<pacemaker-result api-version="2.2" request="crm_mon -1 --output-as xml">
  <resources>
    <resource id="vm03" resource_agent="ocf::heartbeat:VirtualDomain" role="Stopped" target_role="Stopped"
        active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false"
        nodes_running_on="0"/>
    <resource id="vm01" resource_agent="ocf::heartbeat:VirtualDomain" role="Started" target_role="Started"
        active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false"
        nodes_running_on="1">
      <node name="demo-kvm1" id="1084783225" cached="true"/>
    </resource>
  </resources>
</pacemaker-result>
"""

@pytest.fixture
def libvirt():
    if not hasattr(virt_utils, "libvirt"):
        virt_utils.libvirt = Mock()
    return virt_utils.libvirt

@pytest.mark.parametrize(
    "path,expected",
    [
        ("/srv/clusterfs/vms/", "c-clusterfs"),
        ("/srv/clusterfs", "c-clusterfs"),
        ("/foo/bar", None),
    ],
)
def test_get_cluster_filesystem(path, expected):
    """
    test the get_cluster_filesystem() function in normal cases
    """
    with patch.object(virt_utils, "Path", MagicMock(wraps=virt_utils.Path)) as path_mock:
        path_mock.return_value.resolve.return_value = virt_utils.Path(path)
        with patch.object(virt_utils.subprocess, "Popen", MagicMock()) as popen_mock:
            popen_mock.return_value.communicate.return_value = (CRM_CONFIG_XML, None)
            assert virt_utils.get_cluster_filesystem(path) == expected


def test_get_cluster_filesystem_nocrm():
    """
    test the get_cluster_filesystem() function when crm is not installed
    """
    with patch.object(virt_utils, "Path", MagicMock(wraps=virt_utils.Path)) as path_mock:
        path_mock.return_value.resolve.return_value = virt_utils.Path("/srv/clusterfs/xml")
        with patch.object(virt_utils.subprocess, "Popen", MagicMock()) as popen_mock:
            popen_mock.return_value.communicate.side_effect = OSError("No such file or directory: 'crm'")
            assert virt_utils.get_cluster_filesystem("/srv/clusterfs/xml") == None


@pytest.mark.parametrize("no_graphics", [True, False])
def test_vm_info_no_cluster(no_graphics):
    """
    Test the vm_info() function for a VM which isn't in a cluster
    """
    fake_vminfo = {} if no_graphics else {
        "graphics": {
            "type": "spice",
        }
    }
    vminfo_mock = MagicMock(return_value={"vm": fake_vminfo})
    with patch.dict(virt_utils.__salt__, {"virt.vm_info": vminfo_mock}):
        info = virt_utils.vm_info("vm")
        assert info["vm"].get("cluster_primitive") is None
        assert info["vm"].get("graphics_type") == (None if no_graphics else "spice")


def test_vminfo_cluster():
    """
    Test the vm_info() function for VMs in a cluster
    """
    vm_xml_template = """<domain type='kvm'>
  <name>{}</name>
  <uuid>{}</uuid>
  <memory unit='KiB'>524288</memory>
  <currentMemory unit='KiB'>524288</currentMemory>
  <vcpu placement='static'>1</vcpu>
  <devices>
    <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>
      <listen type='address' address='0.0.0.0'/>
    </graphics>
  </devices>
</domain>"""

    vms = [
        ("vm01", "15c09f1f-6ac7-43b5-83e9-96a63c40fb14"),
        ("vm03", "c4596ec0-4e0e-4a1d-aa43-88ba442d5085")
    ]
    vms_xml = [ElementTree.fromstring(vm_xml_template.format(vm[0], vm[1])) for vm in vms]

    vminfo_mock = MagicMock(return_value={"vm01": {"graphics": {"type": "vnc"}}})

    popen_mock = MagicMock()
    popen_mock.return_value.communicate.side_effect = [(CRM_MON_XML, None), (CRM_CONFIG_XML, None)]

    with patch.dict(virt_utils.__salt__, {"virt.vm_info": vminfo_mock}):
        with patch.object(virt_utils.subprocess, "Popen", popen_mock):
            with patch.object(virt_utils.ElementTree, "parse", MagicMock(side_effect=vms_xml)):
                info = virt_utils.vm_info()
                assert info["vm01"].get("cluster_primitive") == "vm01"
                assert info["vm01"].get("graphics_type") =="vnc"
                assert info["vm01"].get("definition_path") == "/srv/clusterfs/vm01.xml"
                assert info["vm01"].get("vcpus") == 1
                assert info["vm01"]["uuid"] == "15c09f1f-6ac7-43b5-83e9-96a63c40fb14"
                assert info["vm03"].get("cluster_primitive") == "vm03"
                assert info["vm03"]["uuid"] == "c4596ec0-4e0e-4a1d-aa43-88ba442d5085"
                assert info["vm03"].get("definition_path") == "/srv/clusterfs/vm03.xml"
                assert info["vm03"].get("memory") == 512
                assert info["vm03"].get("graphics_type") =="vnc"


def test_host_info():
    """
    Test the host_info() function
    """
    popen_mock = MagicMock()
    crm_conf_node = b"""<?xml version="1.0" ?>
<cib>
  <configuration>
    <crm_config/>
    <nodes>
      <node id="1084783225" uname="demo-kvm1"/>
      <node id="1084783226" uname="demo-kvm2"/>
      <node id="1084783227" uname="demo-kvm3"/>
    </nodes>
    <resources/>
    <constraints/>
  </configuration>
</cib>"""
    popen_mock.return_value.communicate.side_effect = [(b"demo-kvm1", None), (crm_conf_node, None)]
    with patch.object(virt_utils.subprocess, "Popen", popen_mock):
        with patch.dict(virt_utils.__salt__, {"virt.get_hypervisor": MagicMock(return_value="kvm")}):
            info = virt_utils.host_info()
            assert info["hypervisor"] == "kvm"
            assert info["cluster_other_nodes"] == ["demo-kvm2", "demo-kvm3"]


def test_vm_definition(libvirt):
    """
    test the vm_definition() function with a regular VM
    """
    with patch.object(libvirt, "open", MagicMock()) as mock_conn:
        mock_conn.return_value.lookupByUUIDString.return_value.name.return_value = "vm01"
        vm_xml = """<domain type='kvm'>
  <name>vm01</name>
  <uuid>15c09f1f-6ac7-43b5-83e9-96a63c40fb14</uuid>
  <memory unit='KiB'>524288</memory>
  <currentMemory unit='KiB'>524288</currentMemory>
  <vcpu placement='static'>1</vcpu>
</domain>"""
        vm_info = {
            "uuid": "15c09f1f-6ac7-43b5-83e9-96a63c40fb14",
            "cpu": 1,
        }

        with patch.dict(virt_utils.__salt__, {
            "virt.get_xml": MagicMock(return_value=vm_xml),
            "virt.vm_info": MagicMock(return_value={"vm01": vm_info})
        }):
            actual = virt_utils.vm_definition("15c09f1f-6ac7-43b5-83e9-96a63c40fb14")
            assert actual["definition"] == vm_xml
            assert actual["info"] == vm_info


def test_vm_definition_cluster(libvirt):
    """
    test the vm_definition() function with a stopped VM defined on a cluster
    """
    vm_xml = """<domain type='kvm'>
  <name>vm01</name>
  <uuid>15c09f1f-6ac7-43b5-83e9-96a63c40fb14</uuid>
  <memory unit='KiB'>524288</memory>
  <currentMemory unit='KiB'>524288</currentMemory>
  <vcpu placement='static'>1</vcpu>
  <devices>
    <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>
      <listen type='address' address='0.0.0.0'/>
    </graphics>
  </devices>
</domain>"""

    with patch.object(libvirt, "open", MagicMock()) as mock_conn:
        with patch.object(libvirt, "libvirtError", Exception) as mock_error:
            mock_conn.return_value.lookupByUUIDString.side_effect = mock_error
            with patch.object(virt_utils.subprocess, "Popen", MagicMock()) as popen_mock:
                popen_mock.return_value.communicate.return_value = (CRM_CONFIG_XML, None)
                with patch("builtins.open", mock_open(read_data=vm_xml)):
                    actual = virt_utils.vm_definition("15c09f1f-6ac7-43b5-83e9-96a63c40fb14")
                    assert actual["definition"] == vm_xml
                    assert actual.get("info") is None


@pytest.mark.parametrize("has_virt_tuner", (True, False))
def test_virt_tuner_templates(has_virt_tuner):
    """
    Test the virt_tuner_templates() function
    """
    templates = ["template1", "template2"] if has_virt_tuner else []
    tuner_mock= MagicMock()
    tuner_mock.templates.keys.return_value = templates
    virt_utils.virt_tuner = tuner_mock if has_virt_tuner else None

    assert virt_utils.virt_tuner_templates() == templates


@pytest.mark.parametrize("has_virt_tuner", (True, False))
def test_domain_parameters(has_virt_tuner):
    """
    Test the domain_parameters() function
    """
    template_params = {"cpu": 22, "mem": 512, "foo": "bar"} if has_virt_tuner else {}
    template_mock = MagicMock()
    template_mock.function.return_value = template_params
    tuner_mock= MagicMock()
    tuner_mock.templates = {"template1": template_mock}
    virt_utils.virt_tuner = tuner_mock if has_virt_tuner else None

    assert virt_utils.domain_parameters(1, 1234, "template1") == (
        template_params if has_virt_tuner else {"cpu": 1, "mem": 1234}
    )
070701000000EE000081B400000000000000000000000163F87E3000001AB3000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_state_mgrcompat.py #-*- coding: utf-8 -*-
'''
Test custom wrapper for "module.run" state module.

Author: Pablo Suárez Herńandez <psuarezhernandez@suse.com>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..states import mgrcompat

TAILORED_MODULE_RUN_KWARGS = {'service.running': [{'text': 'superseded', 'name': 'salt-minion'}, {"foo": "bar"}]}
MGRCOMPAT_MODULE_RUN_KWARGS = {'name': 'service.running', 'text': 'superseded', 'm_name': 'salt-minion', 'kwargs': {'foo': 'bar'}}

mgrcompat.log = MagicMock()
mgrcompat.OrderedDict = dict
mgrcompat.__opts__ = {}
mgrcompat.__grains__ = {}
mgrcompat.__states__ = {}


def test_module_run_on_phosphorous():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3005, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_silicon():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3004, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_silicon_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3004, None, None, None]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_aluminum():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3003, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_aluminum_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3003, None, None, None]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_magnesium():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3002, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_magnesium_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3002, None, None, None]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_sodium():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3001, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_sodium_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3001, None, None, None]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_neon():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3000, None, None, None]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_neon_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [3000, None, None, None]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_2019_2_0_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [2019, 2, 0, 0]}
    ), patch.dict(
        mgrcompat.__opts__, {'use_superseded': ['module.run']}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_2019_2_0_without_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [2019, 2, 0, 0]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_2016_11_4():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    with patch.dict(
        mgrcompat.__grains__, {'saltversioninfo': [2016, 11, 4, 0]}
    ), patch.dict(mgrcompat.__states__, {'module.run': mock}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
 070701000000EF000081B400000000000000000000000163F87E30000003CA000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_state_mgrutils.py  #-*- coding: utf-8 -*-
'''
Test for mgrutils states
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..states import mgrutils


mgrutils.__opts__ = {'test': False}
mgrutils.__grains__ = {}
mgrutils.__salt__ = {}
mgrutils.__states__ = {}


def test_cmd_dump():
    """
    Test cmd_dump()
    """
    mock_managed = MagicMock(return_value={"comment": "dummy"})
    with patch.dict(mgrutils.__states__, {"file.managed": mock_managed}):
        mock_run = MagicMock(return_value="output content")
        with patch.dict(mgrutils.__salt__, {"cmd.run": mock_run}):
            ret = mgrutils.cmd_dump("/path/to/out", "/bin/bar --out xml")
            mock_run.assert_called_once_with("/bin/bar --out xml", raise_err=True, python_shell=False)
            mock_managed.assert_called_once_with("/path/to/out", contents="output content")
            assert ret["comment"] == "dummy"
            assert ret["name"] == "/path/to/out"
  070701000000F0000081B400000000000000000000000163F87E30000010C4000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_state_product.py   '''
Author: cbbayburt@suse.com
'''

import sys
from mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()

from ..states import product

# Mock globals
product.log = MagicMock()
product.__salt__ = {}
product.__grains__ = {}

@patch.dict(product.__grains__, {'os_family': 'Suse'})
def test_suse_with_zypper():
    '''
    Test if the state module is available for SUSE OS only with a
    supported version of zypper (>= 1.8.13) available.
    '''
    # Supported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.9.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=1)):
            assert product.__virtual__() is 'product'
            product.version_cmp.assert_called_once_with('1.9.0', '1.8.13')

    # Unsupported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.8.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=-1)):
            assert product.__virtual__() == (False, "Module product: zypper 1.8.13 or greater required")
            product.version_cmp.assert_called_once_with('1.8.0', '1.8.13')

    # No zypper available
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value=sys.modules['salt.exceptions'].CommandExecutionError)}):
        assert product.__virtual__() == (False, "Module product: zypper package manager not found")


@patch.dict(product.__grains__, {'os_family': 'Non-Suse'})
def test_non_suse():
    '''
    Test if the state module is unavailable for Non-SUSE OS
    '''
    assert product.__virtual__() == (False, "Module product: non SUSE OS not supported")


def test_get_missing_products():
    '''
    Test if the missing products are returned correctly, excluding
    the ones that are provided by another installed product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'product2': True},
        'provides-product1': {'product1': True, 'this-provides-product1': True},
        'provides-product2': {'product2': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        test_data['provides-product2']])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('product2', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that only the non-provided product is returned
        assert res == ['product2']


def test_not_installed_provides():
    '''
    Test if the provided packages are correctly excluded when
    provided by another missing product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'this-provides-product1': True},
        'provides-product1': {'product1': True, 'this-provides-product1': True},
        'provides-product2': {'this-provides-product1': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        test_data['provides-product2']])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('this-provides-product1', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that not both products are returned
        assert len(res) == 1
        # Assert that the provided product is not returned
        assert 'product1' not in res
        # Assert that the providing product is returned
        assert 'this-provides-product1' in res
070701000000F1000081B400000000000000000000000163F87E3000017185000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_state_uyuni_config.py  import pytest
from mock import MagicMock, patch, call
from . import mockery
import pdb

mockery.setup_environment()

import sys

from ..states import uyuni_config

# Mock globals
uyuni_config.log = MagicMock()
uyuni_config.__salt__ = {}
uyuni_config.__opts__ = {'test': False}


class TestManageUser:

    def test_user_present_new_user_test(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                                  'first_name', 'last_name', False,
                                                  ['role'], ['group'],
                                                  'org_admin_user', 'org_admin_password')
                assert result is not None
                assert result['name'] == 'username'
                assert result['result'] is None
                assert result['comment'] == 'username would be modified'

                assert result['changes'] == {
                    'login': {'new': 'username'},
                    'password': {'new': '(hidden)'},
                    'email': {'new': 'mail@mail.com'},
                    'first_name': {'new': 'first_name'},
                    'last_name': {'new': 'last_name'},
                    'roles': {'new': ['role']},
                    'system_groups': {'new': ['group']}}

                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                        org_admin_user='org_admin_user',
                                                                                        org_admin_password='org_admin_password')

    def test_user_present_new_user_minimal(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=exc),
            'uyuni.user_create': MagicMock(return_value=True)}):
            result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                              'first_name', 'last_name')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'

            assert result['changes'] == {
                'login': {'new': 'username'},
                'password': {'new': '(hidden)'},
                'email': {'new': 'mail@mail.com'},
                'first_name': {'new': 'first_name'},
                'last_name': {'new': 'last_name'}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user=None,
                                                                                    org_admin_password=None)

            uyuni_config.__salt__['uyuni.user_create'].assert_called_once_with(email='mail@mail.com',
                                                                               first_name='first_name',
                                                                               last_name='last_name',
                                                                               use_pam_auth=False,
                                                                               org_admin_password=None,
                                                                               org_admin_user=None,
                                                                               password='password',
                                                                               login='username')

    def test_user_present_new_user_complete(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=exc),
            'uyuni.user_create': MagicMock(return_value=True),
            'uyuni.user_add_role': MagicMock(return_value=True),
            'uyuni.user_add_assigned_system_groups': MagicMock(return_value=1)}):
            result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                              'first_name', 'last_name', False,
                                               ['role'], ['group'],
                                              'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'

            assert result['changes'] == {
                'login': {'new': 'username'},
                'password': {'new': '(hidden)'},
                'email': {'new': 'mail@mail.com'},
                'first_name': {'new': 'first_name'},
                'last_name': {'new': 'last_name'},
                'roles': {'new': ['role']},
                'system_groups': {'new': ['group']}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_create'].assert_called_once_with(email='mail@mail.com',
                                                                               first_name='first_name',
                                                                               last_name='last_name',
                                                                               use_pam_auth=False,
                                                                               org_admin_password='org_admin_password',
                                                                               org_admin_user='org_admin_user',
                                                                               password='password',
                                                                               login='username')

            uyuni_config.__salt__['uyuni.user_add_role'].assert_called_once_with('username', role='role',
                                                                                 org_admin_user='org_admin_user',
                                                                                 org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_add_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                   server_group_names=[
                                                                                                      'group'],
                                                                                                   org_admin_user='org_admin_user',
                                                                                                   org_admin_password='org_admin_password')

    def test_user_present_update_user(self):
        exc = Exception("user not found")
        exc.faultCode = 2950

        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=[current_user, exc]),
            'uyuni.user_list_roles': MagicMock(return_value=['role1', 'role2']),
            'uyuni.user_list_assigned_system_groups': MagicMock(return_value=[{'name': 'group1'}, {'name': 'group2'}]),
            'uyuni.user_set_details': MagicMock(return_value=True),
            'uyuni.user_remove_role': MagicMock(return_value=True),
            'uyuni.user_add_role': MagicMock(return_value=True),
            'uyuni.user_remove_assigned_system_groups': MagicMock(return_value=1),
            'uyuni.user_add_assigned_system_groups': MagicMock(return_value=1)}):
            result = uyuni_config.user_present('username', 'new_password', 'new_mail@mail.com',
                                              'new_first', 'new_last', False,
                                               ['role1', 'role3'], ['group2', 'group3'],
                                              'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'
            assert result['changes'] == {
                'password': {'new': '(hidden)', 'old': '(hidden)'},
                'email': {'new': 'new_mail@mail.com', 'old': 'mail@mail.com'},
                'first_name': {'new': 'new_first', 'old': 'first'},
                'last_name': {'new': 'new_last', 'old': 'last'},
                'roles': {'new': ['role1', 'role3'], 'old': ['role1', 'role2']},
                'system_groups': {'new': ['group2', 'group3'], 'old': ['group1', 'group2']}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_has_calls([call('username',
                                                                                   org_admin_user='org_admin_user',
                                                                                   org_admin_password='org_admin_password'),
                                                                              call('username', 'new_password')])

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username',
                                                                                   org_admin_user='org_admin_user',
                                                                                   org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_list_assigned_system_groups'].assert_called_once_with('username',
                                                                                                    org_admin_user='org_admin_user',
                                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_set_details'].assert_called_once_with(email='new_mail@mail.com',
                                                                                    first_name='new_first',
                                                                                    last_name='new_last',
                                                                                    org_admin_password='org_admin_password',
                                                                                    org_admin_user='org_admin_user',
                                                                                    password='new_password',
                                                                                    login='username')

            uyuni_config.__salt__['uyuni.user_remove_role'].assert_called_once_with('username', role='role2',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_add_role'].assert_called_once_with('username', role='role3',
                                                                                 org_admin_user='org_admin_user',
                                                                                 org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_remove_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                      server_group_names=[
                                                                                                         'group1'],
                                                                                                      org_admin_user='org_admin_user',
                                                                                                      org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_add_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                   server_group_names=[
                                                                                                      'group3'],
                                                                                                   org_admin_user='org_admin_user',
                                                                                                   org_admin_password='org_admin_password')

    def test_user_absent_auth_error(self):
        exc = Exception("Auth error")
        exc.faultCode = 2950

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is False
            assert result['comment'] == "Error deleting user (organization credentials error) 'username': Auth error"
            assert result['changes'] == {}

    def test_user_absent_user_not_exits(self):
        exc = Exception("User not found")
        exc.faultCode = -213

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == "username is already absent"
            assert result['changes'] == {}

    def test_user_absent_generic_error(self):
        exc = Exception("generic error")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            with pytest.raises(Exception) as e:
                uyuni_config.user_absent('username',
                                        'org_admin_user', 'org_admin_password')
            assert e.value.faultCode == 2951
            assert e.value.args[0] == 'generic error'

    def test_user_absent_exists_test(self):
        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(return_value=current_user)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.user_absent('username',
                                                 'org_admin_user', 'org_admin_password')

                assert result is not None
                assert result['name'] == 'username'
                assert result['result'] is None
                assert result['comment'] == 'username would be deleted'

                assert result['changes'] == {
                    'login': {'old': 'username'},
                    'email': {'old': 'mail@mail.com'},
                    'first_name': {'old': 'first'},
                    'last_name': {'old': 'last'}}

                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                        org_admin_user='org_admin_user',
                                                                                        org_admin_password='org_admin_password')

    def test_user_absent_exist_user(self):
        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_delete': MagicMock(return_value=True)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')

            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'User username has been deleted'

            assert result['changes'] == {
                'login': {'old': 'username'},
                'email': {'old': 'mail@mail.com'},
                'first_name': {'old': 'first'},
                'last_name': {'old': 'last'}}

            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_delete'].assert_called_once_with('username',
                                                                               org_admin_user='org_admin_user',
                                                                               org_admin_password='org_admin_password')


class TestManageUserChannels:

    def test_user_channels_org_admin(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=["channel_admin"]),
            'uyuni.channel_list_manageable_channels': MagicMock(),
            'uyuni.channel_list_my_channels': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert not result['result']
            assert result['changes'] == {}
            assert 'org_admin' in result['comment']

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_channel_admin(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=["channel_admin"]),
            'uyuni.channel_list_manageable_channels': MagicMock(),
            'uyuni.channel_list_my_channels': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert not result['result']
            assert result['changes'] == {}
            assert 'channel_admin' in result['comment']

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_add_all(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=['manage1'],
                                                subscribable_channels=['subscribe1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {'manageable_channels': {'manage1': True},
                                         'subscribable_channels': {'subscribe1': True}}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_manageable'].assert_called_once_with('manage1',
                                                                                                       'username',
                                                                                                        True,
                                                                                                       'org_admin_user',
                                                                                                       'org_admin_password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_subscribable'].assert_called_once_with('subscribe1',
                                                                                                         'username',
                                                                                                          True,
                                                                                                         'org_admin_user',
                                                                                                         'org_admin_password')

    def test_user_channels_no_changes(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[{"label": "subscribe1"}]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=['manage1'],
                                                subscribable_channels=['subscribe1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_managed_subscribe_change(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=[],
                                                subscribable_channels=['manage1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            print(result)
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {'manageable_channels': {'manage1': False},
                                         'subscribable_channels': {'manage1': True}}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_manageable'].assert_called_once_with('manage1',
                                                                                                       'username',
                                                                                                        False,
                                                                                                       'org_admin_user',
                                                                                                       'org_admin_password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_subscribable'].assert_called_once_with('manage1',
                                                                                                         'username',
                                                                                                          True,
                                                                                                         'org_admin_user',
                                                                                                         'org_admin_password')


class TestManageGroups:

    def test_group_present_new_group_test_no_systems(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(),
            'uyuni.systems_get_minion_id_map': MagicMock()}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_present('my_group', 'my group description',
                                                    target='*http*',
                                                    org_admin_user='org_admin_user',
                                                    org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be updated'

                assert result['changes'] == {'description': {'new': 'my group description'},
                                             'name': {'new': 'my_group'}}

                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

                uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
                uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                                'org_admin_password')

    def test_group_present_new_group_test(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(return_value={'minions': ['my_minion_1', 'my_minion_2']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_present('my_group', 'my group description',
                                                    target='*http*',
                                                    org_admin_user='org_admin_user',
                                                    org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be updated'

                assert result['changes'] == {'description': {'new': 'my group description'},
                                             'systems': {'new': ['10001']},
                                             'name': {'new': 'my_group'}}

                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

                uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
                uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                                'org_admin_password')

    def test_group_present_new_group(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(return_value={'minions': ['my_minion_1', 'my_minion_2']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001'}),
            'uyuni.systemgroup_create': MagicMock(),
            'uyuni.systemgroup_add_remove_systems': MagicMock()}):
            result = uyuni_config.group_present('my_group', 'my group description',
                                                target='*http*',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result'] is True
            assert result['comment'] == 'my_group successfully updated'

            assert result['changes'] == {'description': {'new': 'my group description'},
                                         'systems': {'new': ['10001']},
                                         'name': {'new': 'my_group'}}

            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
            uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                            'org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_create'].assert_called_once_with('my_group', 'my group description',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_add_remove_systems'].assert_called_once_with('my_group', True,
                                                                                                  ['10001'],
                                                                                                  org_admin_user='org_admin_user',
                                                                                                  org_admin_password='org_admin_password')

    def test_group_present_update_group(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(
                return_value={'description': 'old description', 'name': 'my_group'}),
            'uyuni.systemgroup_list_systems': MagicMock(return_value=[{'id': '10001'}, {'id': '10003'}]),
            'uyuni.master_select_minions': MagicMock(
                return_value={'minions': ['my_minion_1', 'my_minion_2', 'my_minion_4']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001', 'my_minion_2': '10002'}),
            'uyuni.systemgroup_update': MagicMock(),
            'uyuni.systemgroup_add_remove_systems': MagicMock()}):
            result = uyuni_config.group_present('my_group', 'my group description',
                                                target='*http*',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'my_group successfully updated'

            assert result['changes'] == {'description': {'new': 'my group description',
                                                         'old': 'old description'},
                                         'systems': {'new': ['10001', '10002'],
                                                     'old': ['10001', '10003']}}

            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_list_systems'].assert_called_once_with('my_group',
                                                                                            org_admin_user='org_admin_user',
                                                                                            org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
            uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                            'org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_update'].assert_called_once_with('my_group', 'my group description',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_add_remove_systems'].assert_has_calls([call('my_group', False,
                                                                                                 ['10003'],
                                                                                                 org_admin_user='org_admin_user',
                                                                                                 org_admin_password='org_admin_password'),
                                                                                            call('my_group', True,
                                                                                                ['10002'],
                                                                                                org_admin_user='org_admin_user',
                                                                                                org_admin_password='org_admin_password')])

    def test_group_absent_success_test(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(
                return_value={'description': 'description', 'name': 'my_group'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_absent('my_group',
                                                   org_admin_user='org_admin_user',
                                                   org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be removed'

                assert result['changes'] == {}
                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

    def test_group_absent_success(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(return_value={'description': 'description', 'name': 'my_group'}),
            'uyuni.systemgroup_delete': MagicMock(return_value=True)}):
            result = uyuni_config.group_absent('my_group',
                                               org_admin_user='org_admin_user',
                                               org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'Group my_group has been deleted'

            assert result['changes'] == {'description': {'old': 'description'},
                                         'name': {'old': 'my_group'}}
            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_delete'].assert_called_once_with('my_group',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

    def test_group_absent_already_removed(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {'uyuni.systemgroup_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.group_absent('my_group',
                                               org_admin_user='org_admin_user',
                                               org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'my_group is already absent'

            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')


class TestManageOrgs:

    def test_org_present_new_org_test(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {'uyuni.org_get_details': MagicMock(side_effect=exc)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                                 'First Name', 'Last Name', 'email@email.com',
                                                  admin_user='admin_user',
                                                  admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'my_org'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be updated'
                assert result['changes'] == {'email': {'new': 'email@email.com'},
                                             'first_name': {'new': 'First Name'},
                                             'last_name': {'new': 'Last Name'},
                                             'org_admin_user': {'new': 'org_admin_user'},
                                             'org_name': {'new': 'my_org'},
                                             'pam': {'new': False}}
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_present_new_org(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(side_effect=exc),
            'uyuni.org_create': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org org successfully modified'
            assert result['changes'] == {'email': {'new': 'email@email.com'},
                                         'first_name': {'new': 'First Name'},
                                         'last_name': {'new': 'Last Name'},
                                         'org_admin_user': {'new': 'org_admin_user'},
                                         'org_name': {'new': 'my_org'},
                                         'pam': {'new': False}}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_create'].assert_called_once_with(name='my_org',
                                                                              org_admin_user="org_admin_user",
                                                                              org_admin_password="org_admin_password",
                                                                              first_name="First Name",
                                                                              last_name="Last Name",
                                                                              email="email@email.com",
                                                                              admin_user='admin_user',
                                                                              admin_password='admin_password',
                                                                              pam=False)

    def test_org_present_update_org(self):
        current_user = {'uui': 'org_admin_user',
                        'email': 'old_mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_set_details': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org org successfully modified'
            assert result['changes'] == {'email': {'new': 'email@email.com',
                                                   'old': 'old_mail@mail.com'},
                                         'first_name': {'new': 'First Name',
                                                        'old': 'first'},
                                         'last_name': {'new': 'Last Name',
                                                       'old': 'last'}}

            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_set_details'].assert_called_once_with(login='org_admin_user',
                                                                                    password='org_admin_password',
                                                                                    email='email@email.com',
                                                                                    first_name='First Name',
                                                                                    last_name='Last Name',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

    def test_org_present_no_changes(self):
        current_user = {'uui': 'org_admin_user',
                        'email': 'email@email.com',
                        'first_name': 'First Name',
                        'last_name': 'Last Name'}
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_set_details': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

    def test_org_absent_success_test(self):
        with patch.dict(uyuni_config.__salt__,
                        {'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_absent('my_org',
                                                 admin_user='admin_user',
                                                 admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'my_org'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be removed'
                assert result['changes'] == {}
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_absent_success(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.org_delete': MagicMock()}):
            result = uyuni_config.org_absent('my_org',
                                             admin_user='admin_user',
                                             admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'Org my_org has been deleted'
            assert result['changes'] == {'name': {'old': 'my_org'}}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_delete'].assert_called_once_with('my_org',
                                                                              admin_user='admin_user',
                                                                              admin_password='admin_password')

    def test_org_absent_not_present(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {'uyuni.org_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.org_absent('my_org',
                                             admin_user='admin_user',
                                             admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org is already absent'
            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')


class TestManageOrgsTrust:

    def test_org_trust_test(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': False}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                                admin_user='admin_user',
                                                admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'state_name'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be created'
                assert result['changes'] == {'new_org_2': {'new': True, 'old': None}}

                uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                             admin_user='admin_user',
                                                                                             admin_password='admin_password')
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_trust_update(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': False},
                              {'orgId': 4, 'orgName': 'new_org_3', 'trustEnabled': True}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'}),
            'uyuni.org_trust_add_trust': MagicMock(return_value=True),
            'uyuni.org_trust_remove_trust': MagicMock(return_value=True)}):

            result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                            admin_user='admin_user',
                                            admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'state_name'
            assert result['result']
            assert result['comment'] == "Org 'my_org' trusts successfully modified"
            assert result['changes'] == {'new_org_2': {'new': True, 'old': None},
                                         'new_org_3': {'new': None, 'old': True}}

            uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                         admin_user='admin_user',
                                                                                         admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_trust_add_trust'].assert_called_once_with(1, 3,
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_trust_remove_trust'].assert_called_once_with(1, 4,
                                                                                          admin_user='admin_user',
                                                                                          admin_password='admin_password')


    def test_org_trust_no_changes(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': True},
                              {'orgId': 4, 'orgName': 'new_org_3', 'trustEnabled': False}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'})}):

            result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                            admin_user='admin_user',
                                            admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'state_name'
            assert result['result']
            assert result['comment'] == 'my_org is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                         admin_user='admin_user',
                                                                                         admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')


class TestUyuniActivationKeys:

    MINIMAL_AK_PRESENT = {
        'name': 'ak',
        'description': 'ak description',
        'org_admin_user': 'admin',
        'org_admin_password': 'admin'
    }

    FULL_AK_PRESENT = {
        **MINIMAL_AK_PRESENT,
        'base_channel': 'sles15SP2',
        'usage_limit': 10,
        'contact_method': 'ssh-push',
        'system_types': ['virtualization_host'],
        'universal_default': True,
        'child_channels': ['sles15SP2-tools'],
        'configuration_channels': ['my-channel'],
        'packages': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}],
        'server_groups': ['my-group'],
        'configure_after_registration': True
    }

    ORG_USER_DETAILS = {
        'org_id': 1
    }

    ALL_GROUPS = [
        {'name': 'my-group', 'id': 1},
        {'name': 'old_group', 'id': 2}
    ]

    def test_ak_present_create_minimal_data(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc),
            'uyuni.activation_key_create': MagicMock(),
            'uyuni.activation_key_set_details': MagicMock()}):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {
                'description': {'new': 'ak description'},
                'base_channel': {'new': ''},
                'usage_limit': {'new': 0},
                'universal_default': {'new': False},
                'contact_method': {'new': 'default'},
                'configure_after_registration': {'new': False},
                'key': {'new': '1-ak'}
            }
            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

            call_values = {'description': self.MINIMAL_AK_PRESENT['description'],
                           'key': self.MINIMAL_AK_PRESENT['name'],
                           'base_channel_label': '',
                           'usage_limit': 0,
                           'system_types': [],
                           'universal_default': False,
                           'org_admin_user': self.MINIMAL_AK_PRESENT['org_admin_user'],
                           'org_admin_password': self.MINIMAL_AK_PRESENT['org_admin_password']}

            uyuni_config.__salt__['uyuni.activation_key_create'].assert_called_once_with(**call_values)
            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              contact_method='default',
                                                                                              usage_limit=0,
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

    def test_ak_present_create_full_data(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc),
            'uyuni.activation_key_create': MagicMock(),
            'uyuni.activation_key_set_details': MagicMock(),
            'uyuni.activation_key_add_child_channels': MagicMock(),
            'uyuni.activation_key_add_server_groups': MagicMock(),
            'uyuni.activation_key_add_packages': MagicMock(),
            'uyuni.activation_key_enable_config_deployment': MagicMock(),
            'uyuni.activation_key_set_config_channels': MagicMock(),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {
                'description': {'new': 'ak description'},
                'base_channel': {'new': 'sles15SP2'},
                'usage_limit': {'new': 10},
                'universal_default': {'new': True},
                'contact_method': {'new': 'ssh-push'},
                'system_types': {'new': ['virtualization_host']},
                'child_channels': {'new': ['sles15SP2-tools']},
                'server_groups': {'new': ['my-group']},
                'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}]},
                'configure_after_registration': {'new': True},
                'configuration_channels': {'new': ['my-channel']},
                'key': {'new': '1-ak'}
            }
            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

            uyuni_config.__salt__['uyuni.activation_key_create'].assert_called_once_with(
                description=self.FULL_AK_PRESENT['description'],
                key=self.FULL_AK_PRESENT['name'],
                base_channel_label=self.FULL_AK_PRESENT['base_channel'],
                usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                system_types=self.FULL_AK_PRESENT['system_types'],
                universal_default=self.FULL_AK_PRESENT['universal_default'],
                org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                org_admin_password=self.FULL_AK_PRESENT['org_admin_password']
            )
            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              contact_method=self.FULL_AK_PRESENT['contact_method'],
                                                                                              usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                                                                                              org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_child_channels'].assert_called_once_with('1-ak',
                                                                                                      self.FULL_AK_PRESENT['child_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_server_groups'].assert_called_once_with('1-ak',
                                                                                                    [1],
                                                                                                    org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                    org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_packages'].assert_called_once_with('1-ak',
                                                                                                self.FULL_AK_PRESENT['packages'],
                                                                                                org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_enable_config_deployment'].assert_called_once_with('1-ak',
                                                                                                            org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                            org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_set_config_channels'].assert_called_once_with(['1-ak'],
                                                                                                      config_channel_label=self.FULL_AK_PRESENT['configuration_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

    def test_ak_present_create_full_data_test(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc)
        }):
            with patch.dict(uyuni_config.__opts__, {'test': True}):

                result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)

                assert result is not None
                assert result['name'] == '1-ak'
                assert result['result'] is None
                assert result['comment'] == '1-ak would be updated'

                assert result['changes'] == {
                    'description': {'new': 'ak description'},
                    'base_channel': {'new': 'sles15SP2'},
                    'usage_limit': {'new': 10},
                    'universal_default': {'new': True},
                    'contact_method': {'new': 'ssh-push'},
                    'system_types': {'new': ['virtualization_host']},
                    'child_channels': {'new': ['sles15SP2-tools']},
                    'server_groups': {'new': ['my-group']},
                    'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}]},
                    'configure_after_registration': {'new': True},
                    'configuration_channels': {'new': ['my-channel']},
                    'key': {'new': '1-ak'}
                }
                uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
                uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                                  org_admin_user='admin',
                                                                                                  org_admin_password='admin')

    def test_ak_present_update_minimal_data(self):
        return_ak = {
            'description': 'old description',
            'base_channel_label': 'none',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': [],
            'child_channel_labels': [],
            'server_group_ids': [],
            'packages': []
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[]),
            'uyuni.activation_key_set_details': MagicMock()
        }):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {'description': {'new': 'ak description', 'old': 'old description'}}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              description=self.MINIMAL_AK_PRESENT['description'],
                                                                                              contact_method='default',
                                                                                              base_channel_label='',
                                                                                              usage_limit=0,
                                                                                              universal_default=False,
                                                                                              org_admin_user=self.MINIMAL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.MINIMAL_AK_PRESENT['org_admin_password'])

    def test_ak_present_no_changes_minimal_data(self):
        return_ak = {
            'description': self.MINIMAL_AK_PRESENT['description'],
            'base_channel_label': 'none',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': [],
            'child_channel_labels': [],
            'server_group_ids': [],
            'packages': []
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[]),
            'uyuni.activation_key_set_details': MagicMock()
        }):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak is already in the desired state'
            assert result['changes'] == {}

    def test_ak_present_update_full_data(self):

        return_ak = {
            'description': 'old description',
            'base_channel_label': 'base_channel',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': ['container_build_host'],
            'child_channel_labels': ['child_channel'],
            'server_group_ids': [2],
            'packages': [{'name': 'pkg', 'arch': 'x86_63'}]
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[{'label': 'old_config'}]),
            'uyuni.activation_key_set_details': MagicMock(),
            'uyuni.activation_key_add_entitlements': MagicMock(),
            'uyuni.activation_key_remove_entitlements': MagicMock(),
            'uyuni.activation_key_add_child_channels': MagicMock(),
            'uyuni.activation_key_remove_child_channels': MagicMock(),
            'uyuni.activation_key_add_server_groups': MagicMock(),
            'uyuni.activation_key_remove_server_groups': MagicMock(),
            'uyuni.activation_key_add_packages': MagicMock(),
            'uyuni.activation_key_remove_packages': MagicMock(),
            'uyuni.activation_key_enable_config_deployment': MagicMock(),
            'uyuni.activation_key_set_config_channels': MagicMock(),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {'description': {'new': 'ak description', 'old': 'old description'},
                                         'base_channel': {'new': 'sles15SP2', 'old': 'base_channel'},
                                         'usage_limit': {'new': 10, 'old': 0},
                                         'universal_default': {'new': True, 'old': False},
                                         'contact_method': {'new': 'ssh-push', 'old': 'default'},
                                         'system_types': {'new': ['virtualization_host'],
                                                          'old': ['container_build_host']},
                                         'child_channels': {'new': ['sles15SP2-tools'], 'old': ['child_channel']},
                                         'server_groups': {'new': ['my-group'], 'old': ['old_group']},
                                         'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}],
                                                      'old': [{'name': 'pkg', 'arch': 'x86_63'}]},
                                         'configure_after_registration': {'new': True, 'old': False},
                                         'configuration_channels': {'new': ['my-channel'], 'old': ['old_config']}}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              description=self.FULL_AK_PRESENT['description'],
                                                                                              contact_method=self.FULL_AK_PRESENT['contact_method'],
                                                                                              base_channel_label=self.FULL_AK_PRESENT['base_channel'],
                                                                                              usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                                                                                              universal_default=self.FULL_AK_PRESENT['universal_default'],
                                                                                              org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_entitlements'].assert_called_once_with('1-ak',
                                                                                                   self.FULL_AK_PRESENT['system_types'],
                                                                                                   org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                   org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_entitlements'].assert_called_once_with('1-ak',
                                                                                                      ['container_build_host'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_child_channels'].assert_called_once_with('1-ak',
                                                                                                     self.FULL_AK_PRESENT['child_channels'],
                                                                                                     org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                     org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_child_channels'].assert_called_once_with('1-ak',
                                                                                                        ['child_channel'],
                                                                                                        org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                        org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_server_groups'].assert_called_once_with('1-ak',
                                                                                                    [1],
                                                                                                    org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                    org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_server_groups'].assert_called_once_with('1-ak',
                                                                                                       [2],
                                                                                                       org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                       org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_packages'].assert_called_once_with('1-ak',
                                                                                               self.FULL_AK_PRESENT['packages'],
                                                                                               org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                               org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_packages'].assert_called_once_with('1-ak',
                                                                                                  [{'name': 'pkg', 'arch': 'x86_63'}],
                                                                                                  org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                  org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_enable_config_deployment'].assert_called_once_with('1-ak',
                                                                                                           org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                           org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_set_config_channels'].assert_called_once_with(['1-ak'],
                                                                                                      config_channel_label=self.FULL_AK_PRESENT['configuration_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

    def test_ak_present_no_changes_full_data(self):

        return_ak = {
            'description': self.FULL_AK_PRESENT['description'],
            'base_channel_label': self.FULL_AK_PRESENT['base_channel'],
            'usage_limit': self.FULL_AK_PRESENT['usage_limit'],
            'universal_default': self.FULL_AK_PRESENT['universal_default'],
            'contact_method': self.FULL_AK_PRESENT['contact_method'],
            'entitlements': self.FULL_AK_PRESENT['system_types'],
            'child_channel_labels': self.FULL_AK_PRESENT['child_channels'],
            'server_group_ids': [1],
            'packages': self.FULL_AK_PRESENT['packages']
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=True),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[{'label': 'my-channel'}]),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

    def test_ak_absent_not_present(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
                                                'uyuni.activation_key_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.activation_key_absent('ak',
                                                        org_admin_user='org_admin_user',
                                                        org_admin_password='org_admin_password')

            assert result is not None
            assert result['name'] == 'ak'
            assert result['result']
            assert result['comment'] == '1-ak is already absent'
            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user', 'org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='org_admin_user',
                                                                                              org_admin_password='org_admin_password')

    def test_ak_absent_present(self):

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value={}),
            'uyuni.activation_key_delete': MagicMock()}):

            result = uyuni_config.activation_key_absent('ak',
                                                        org_admin_user='org_admin_user',
                                                        org_admin_password='org_admin_password')

            assert result is not None
            assert result['name'] == 'ak'
            assert result['result']
            assert result['comment'] == 'Activation Key 1-ak has been deleted'
            assert result['changes'] == {'id': {'old': '1-ak'}}
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user', 'org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='org_admin_user',
                                                                                              org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_delete'].assert_called_once_with('1-ak',
                                                                                          org_admin_user='org_admin_user',
                                                                                          org_admin_password='org_admin_password')
   070701000000F2000081B400000000000000000000000163F87E3000001FEE000000000000000000000000000000000000003300000000susemanager-sls/src/tests/test_state_virt_utils.py    from mock import MagicMock, patch, call
from . import mockery

mockery.setup_environment()
import pytest

from ..states import virt_utils

# Mock globals
virt_utils.log = MagicMock()
virt_utils.__salt__ = {}
virt_utils.__grains__ = {}
virt_utils.__opts__ = {}

TEST_NETS = {
    "net0": {"active": True},
    "net1": {"active": True},
    "net2": {"active": False},
    "net3": {"active": False},
}

TEST_POOLS = {
    "pool0": {"state": "running"},
    "pool1": {"state": "running"},
    "pool2": {"state": "stopped"},
    "pool3": {"state": "stopped"},
}


@pytest.mark.parametrize("test", [False, True])
def test_network_running(test):
    """
    test the network_running function with only one name
    """
    with patch.dict(virt_utils.__opts__, {"test": test}):
        start_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.network_info": MagicMock(return_value=TEST_NETS),
                "virt.network_start": start_mock,
            },
        ):
            ret = virt_utils.network_running(name="net2")
            if test:
                assert ret["result"] is None
                start_mock.assert_not_called()
            else:
                assert ret["result"]
                start_mock.assert_called_with("net2")
            assert ret["comment"] == "net2 network has been started"


@pytest.mark.parametrize("test", [False, True])
def test_network_multiple(test):
    """
    test the network_running function with several names
    """
    with patch.dict(virt_utils.__opts__, {"test": test}):
        start_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.network_info": MagicMock(return_value=TEST_NETS),
                "virt.network_start": start_mock,
            },
        ):
            ret = virt_utils.network_running(
                name="the-state-id", networks=["net0", "net1", "net2", "net3"]
            )
            if test:
                assert ret["result"] is None
                start_mock.assert_not_called()
            else:
                assert ret["result"]
                assert start_mock.mock_calls == [
                    call("net2"),
                    call("net3"),
                ]
            assert ret["comment"] == "net2, net3 networks have been started"
            assert ret["changes"] == {"net2": "started", "net3": "started"}


def test_network_missing():
    """
    test the network_running function with names of missing networks
    """
    with patch.dict(virt_utils.__opts__, {"test": True}):
        start_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.network_info": MagicMock(return_value=TEST_NETS),
                "virt.network_start": start_mock,
            },
        ):
            ret = virt_utils.network_running(
                name="the-state-id", networks=["net0", "net1", "net2", "net5"]
            )
            assert not ret["result"]
            start_mock.assert_not_called()
            assert ret["comment"] == "net5 network is not defined"
            assert ret["changes"] == {}


@pytest.mark.parametrize("test", [False, True])
def test_pool_running(test):
    """
    test the pool_running function with only one name
    """
    with patch.dict(virt_utils.__opts__, {"test": test}):
        start_mock = MagicMock(return_value=True)
        refresh_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.pool_info": MagicMock(return_value=TEST_POOLS),
                "virt.pool_start": start_mock,
                "virt.pool_refresh": refresh_mock,
            },
        ):
            ret = virt_utils.pool_running(name="pool2")
            if test:
                assert ret["result"] is None
                start_mock.assert_not_called()
            else:
                assert ret["result"]
                start_mock.assert_called_with("pool2")
            refresh_mock.assert_not_called()
            assert ret["comment"] == "pool2 pool has been started"
            assert ret["changes"] == {"pool2": "started"}


@pytest.mark.parametrize("test", [False, True])
def test_pool_multiple(test):
    """
    test the pool_running function with several names
    """
    with patch.dict(virt_utils.__opts__, {"test": test}):
        start_mock = MagicMock(return_value=True)
        refresh_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.pool_info": MagicMock(return_value=TEST_POOLS),
                "virt.pool_start": start_mock,
                "virt.pool_refresh": refresh_mock,
            },
        ):
            ret = virt_utils.pool_running(
                name="the-state-id", pools=["pool0", "pool1", "pool2", "pool3"]
            )
            if test:
                assert ret["result"] is None
                start_mock.assert_not_called()
            else:
                assert ret["result"]
                assert start_mock.mock_calls == [
                    call("pool2"),
                    call("pool3"),
                ]
                assert refresh_mock.mock_calls == [
                    call("pool0"),
                    call("pool1"),
                ]
            assert ret["comment"] == "pool2, pool3 pools have been started"
            assert ret["changes"] == {
                "pool0": "refreshed",
                "pool1": "refreshed",
                "pool2": "started",
                "pool3": "started",
            }


def test_pool_missing():
    """
    test the pool_running function with names of undefined pools
    """
    with patch.dict(virt_utils.__opts__, {"test": True}):
        start_mock = MagicMock(return_value=True)
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.pool_info": MagicMock(return_value=TEST_POOLS),
                "virt.pool_start": start_mock,
            },
        ):
            ret = virt_utils.pool_running(
                name="the-state-id", pools=["pool0", "pool1", "pool2", "pool5"]
            )
            assert not ret["result"]
            start_mock.assert_not_called()
            assert ret["comment"] == "pool5 pool is not defined"
            assert ret["changes"] == {}


def test_vm_resources_running():
    """
    test the vm_resources_running function
    """
    with patch.dict(virt_utils.__opts__, {"test": False}):
        start_net_mock = MagicMock(return_value=True)
        start_pool_mock = MagicMock(return_value=True)
        refresh_pool_mock = MagicMock(return_value=True)
        test_vm_info = {
           "vm1": {
               "nics": {
                   "nic0": {"type": "network", "source": {"network": "net0"}},
                   "nic1": {"type": "network", "source": {"network": "net3"}},
                },
               "disks": {
                   "disk0": {"file": "pool0/system"},
                   "disk1": {"file": "pool2/data"},
                   "disk2": {"file": "/foo/bar.qcow2"},
               }
            },
        }
        with patch.dict(
            virt_utils.__salt__,
            {
                "virt.network_info": MagicMock(return_value=TEST_NETS),
                "virt.pool_info": MagicMock(return_value=TEST_POOLS),
                "virt.network_start": start_net_mock,
                "virt.pool_start": start_pool_mock,
                "virt.pool_refresh": refresh_pool_mock,
                "virt.vm_info": MagicMock(return_value=test_vm_info)
            },
        ):
            ret = virt_utils.vm_resources_running("vm1")
            assert ret["result"]
            assert ret["changes"] == {
                "networks": {"net3": "started"},
                "pools": {"pool0": "refreshed", "pool2": "started"},
            }
            assert start_net_mock.mock_calls == [call("net3")]
            assert start_pool_mock.mock_calls == [call("pool2")]
            assert refresh_pool_mock.mock_calls == [call("pool0")]
  070701000000F3000081B400000000000000000000000163F87E300000A2D6000000000000000000000000000000000000002800000000susemanager-sls/susemanager-sls.changes   -------------------------------------------------------------------
Fri Feb 24 10:05:50 CET 2023 - jgonzalez@suse.com

- version 4.2.31-1
  * Flush uyuni roster cache if the config has changed
  * Implement uyuni roster module for Salt (bsc#1200096)

-------------------------------------------------------------------
Thu Feb 16 11:51:26 CET 2023 - jgonzalez@suse.com

- version 4.2.30-1
  * Fix dnf plugin path calculation when using Salt Bundle (bsc#1208335)

-------------------------------------------------------------------
Thu Feb 09 09:46:22 CET 2023 - jgonzalez@suse.com

- version 4.2.29-1
  * Improve _mgractionchains.conf logs
  * Prevent possible errors from "mgractionschains" module when there is no action chain to resume.
  * Fix mgrnet custom module to be compatible with old Python 2.6 (bsc#1206979) (bsc#1206981)
  * Fix custom "mgrcompat.module_run" state module to work with Salt 3005.1
  * filter out libvirt engine events (bsc#1206146)
  * Optimize the number of salt calls on minion startup (bsc#1203532)
  * Updated logrotate configuration (bsc#1206470)
  * Make libvirt-events.conf path depend on what minion is used (bsc#1205920)
  * Fix kiwi inspect regexp to allow image names with "-" (bsc#1204541)
  * Avoid installing recommended packages from assigned products (bsc#1204330)
  * Manager reboot in transactional update action chain (bsc#1201476)
  * Use the actual sudo user home directory for salt ssh
    clients on bootstrap and clean up (bsc#1202093)
  * Perform refresh with packages.pkgupdate state (bsc#1203884)

-------------------------------------------------------------------
Tue Oct 18 10:05:44 CEST 2022 - jgonzalez@suse.com

- version 4.2.28-1
  * Fix mgrnet availability check
  * Remove dependence on Kiwi libraries
  * Use mgrnet.dns_fqdns module to improve FQDN detection (bsc#1199726)
  * Add mgrnet salt module with mgrnet.dns_fqnd function implementation
    allowing to get all possible FQDNs from DNS (bsc#1199726)

-------------------------------------------------------------------
Thu Sep 08 09:33:53 CEST 2022 - mc@suse.de

- version 4.2.27-1
  * Copy grains file with util.mgr_switch_to_venv_minion state apply
  * Remove the message 'rpm: command not found' on using Salt SSH
    with Debian based systems which has no Salt Bundle
  * Prevent possible tracebacks on calling module.run from mgrcompat
    by setting proper globals with using LazyLoader
  * Fix deploy of SLE Micro CA Certificate (bsc#1200276)

-------------------------------------------------------------------
Tue Jul 26 13:41:02 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.26-1
  * Fix issue bootstrap issue with Debian 9 because missing python3-contextvars (bsc#1201782)

-------------------------------------------------------------------
Wed Jul 06 15:56:58 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.25-1
  * use RES bootstrap repo as a fallback for Red Hat downstream OS (bsc#1200087)
  * Add support to packages.pkgremove to deal with duplicated pkg names (bsc#1198686)
  * do not install products and gpg keys when performing distupgrade
    dry-run (bsc#1199466)
  * Fix deprecated warning when getting pillar data (bsc#1192850)
  * remove unknown repository flags on EL
  * add packages.pkgupdate state (bsc#1197507)

-------------------------------------------------------------------
Mon Jun 20 18:19:05 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.24-1
  * Manage the correct minion config file when venv-salt-minion is installed (bsc#1200703)
  * Fix bootstrapping for Ubuntu 18.04 with classic Salt package (bsc#1200707)

-------------------------------------------------------------------
Mon May 09 17:48:00 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.23-1
  * Fix bootstrap repository URL resolution for Yum based clients
    with preflight script for Salt SSH

-------------------------------------------------------------------
Wed May 04 14:09:02 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.22-1
  * Add Salt Bundle support on bootstrapping
  * Add Salt SSH with Salt Bundle support
  * Add util.mgr_switch_to_venv_minion state to switch salt minions
    to use the Salt Bundle
  * Fix bootstrap repository path resolution for Oracle Linux
  * Handle salt bundle in set_proxy.sls

-------------------------------------------------------------------
Tue Mar 29 14:23:29 CEST 2022 - jmassaguerpla@suse.de

- version 4.2.21-1
  * Improve `pkgset` beacon with using `salt.cache`
    to notify about the changes made while the minion was stopped
  * Align the code of pkgset beacon to prevent warnings (bsc#1194464)
  * fixing how the return code is returned in mgrutil runner (bsc#1194909)
  * Fix errors on calling sed -E ... by force_restart_minion
    with action chains
  * Avoid using lscpu -J option in grains (bsc#1195920)
  * Postgres exporter package was renamed
  * fix deprecation warnings

-------------------------------------------------------------------
Wed Feb 02 13:54:41 CET 2022 - jmassaguerpla@suse.de

- version 4.2.20-1
  * Handle multiple Kiwi bundles (bsc#1194905)
  * enforce correct minion configuration similar to bootstrapping
    (bsc#1192510)
  * Add state for changing proxy
  * Update proxy path on minion connection
  * Fix problem installing/removing packages using action chains
    in transactional systems

-------------------------------------------------------------------
Wed Nov 17 12:42:18 CET 2021 - jmassaguerpla@suse.de

- version 4.2.19-1
  * fix openscap scan with tailoring options (bsc#1192321)
  * Fix virt_utils module python 2.6 compatibility (bsc#1191123)
  * Implement using re-activation keys when bootstrapping
  * Add missing compressed_hash value from Kiwi inspect (bsc#1191702)
  * Don't create skeleton /srv/salt/top.sls
  * Run Prometheus JMX exporter as Java agent (bsc#1184617)
  * Replace FileNotFoundError by python2-compatible OSError (bsc#1191139)

-------------------------------------------------------------------
Fri Oct 22 15:55:30 CEST 2021 - jgonzalez@suse.com

- version 4.2.18-1
  * revert disable unaccessible local repos before bootstrapping (bsc#1186405)

-------------------------------------------------------------------
Wed Oct 06 09:40:53 CEST 2021 - jgonzalez@suse.com

- version 4.2.17-1
  * Fix cpuinfo grain and virt_utils state python2 compatibility
    (bsc#1191139, bsc#1191123)
  * deploy certificate on SLE Micro 5.1
  * Realign pkgset cookie path for Salt Bundle changes
  * Fix pkgset beacon to work with salt-minion 2016.11.10 (bsc#1189260)
  * Fix virt grain python2 compatibility
  * disable unaccessible local repos before bootstrapping (bsc#1186405)
  * Fix mgrcompat state module to work with Salt 3003 and 3004
  * Add 'flush_cache' flag to 'ansible.playbooks' call (bsc#1190405)
  * Update kernel live patch version on minion startup (bsc#1190276)
  * don't use libvirt API to get its version for the virt features grain
  * implement package locking for salt minions

-------------------------------------------------------------------
Mon Aug 30 12:05:54 CEST 2021 - jmassaguerpla@suse.de

- version 4.2.16-1
- Add Rocky Linux 8 support
- Enable logrotate configuration for Salt SSH minion logs
- Add UEFI support for VM creation
- Add virt-tuner templates to VM creation
- Handle more ocsf2 setups in virt_utils module
- Add missing symlinks to generate the "certs" state for
  SLE Micro 5.0 and openSUSE MicroOS minions (bsc#1188503)
- Add findutils to Kiwi bootstrap packages
- Remove systemid file on salt client cleanup
- Add support for Kiwi options
- Skip 'update-ca-certificates' run if the certs are updated automatically
- Use lscpu to provide more CPU grains for all architectures
- Fix deleting stopped virtual network (bsc#1186281)
- Handle virtual machines running on pacemaker cluster

-------------------------------------------------------------------
Fri Jul 16 14:36:09 CEST 2021 - jgonzalez@suse.com

- version 4.2.15-1
- Fix parameters for 'runplaybook' state (bsc#1188395)
- Fix Salt scap state to use new 'xccdf_eval' function
- fix product detection for native RHEL products (bsc#1187397)
- when bootstrapping with ssh-push with tunnel use the port number
  for fetching GPG keys from the server (bsc#1187441)

-------------------------------------------------------------------
Thu Jun 10 13:46:47 CEST 2021 - jgonzalez@suse.com

- version 4.2.14-1
- exclude openSUSE Leap 15.3 from product installation (bsc#1186858)
- Accept GPG key in Amazon Linux 2 for res7tools channel (bsc#1187102)

-------------------------------------------------------------------
Thu Jun 03 13:56:59 CEST 2021 - jgonzalez@suse.com

- version 4.2.13-1
- Enable certificate deployment for Leap 15.3 clients which is needed for
  bootstrapping (bsc#1186765)

-------------------------------------------------------------------
Tue Jun 01 17:36:54 CEST 2021 - jgonzalez@suse.com

- version 4.2.12-1
- Do not assume Amazon bootstrap repo on RHEL and AlmaLinux instances (bsc#1186703)

-------------------------------------------------------------------
Mon May 24 12:42:03 CEST 2021 - jgonzalez@suse.com

- version 4.2.11-1
- fix installation of gnupg on Debian 10
- Fix deleting stopped virtual network (bsc#1186281)
- Do not install python2-salt on Salt 3002.2 Docker build hosts (bsc#1185506)
- Add support for 'disable_local_repos' salt minion config parameter
  (bsc#1185568)

-------------------------------------------------------------------
Mon May 10 17:46:51 CEST 2021 - jgonzalez@suse.com

- version 4.2.10-1
- fix product detection while bootstrapping RedHat like products (bsc#1185846)

-------------------------------------------------------------------
Wed May 05 16:44:00 CEST 2021 - jgonzalez@suse.com

- version 4.2.9-1
- Always create systemid file to indicate minion is managed by Uyuni
- Switch from GPLv2 to Apache 2.0.
- Add support of salt bundle to pkgset notify beacon
- Add automatic cookie file selection for pkgset beacon
- Ansible integration: new SLS files files to operate Ansible control node
- provide details when bootstrap query is missing 'status'
- add virtual network edit action

-------------------------------------------------------------------
Thu Apr 29 11:51:00 CEST 2021 - jgonzalez@suse.com

- version 4.2.8-1
- Do not assume AmazonLinux bootstrap repo for CentOS (bsc#1185421)

-------------------------------------------------------------------
Fri Apr 16 13:35:25 CEST 2021 - jgonzalez@suse.com

- version 4.2.7-1
- Fix insecure JMX configuration (bsc#1184617)
- Add support for notify beacon for Debian/Ubuntu systems
- Automatically start needed networks and storage pools when creating/starting a VM
- Avoid conflicts with running ioloop on mgr_events engine (bsc#1172711)
- Require new kiwi-systemdeps packages (bsc#1184271)
- keep salt-minion when it is installed to prevent update problems with
  dependend packages not available in the bootstrap repo (bsc#1183573)
- Add support for AlmaLinux 8
- Provide Custom Info as Pillar data
- Add support for Amazon Linux 2
- Add support for Alibaba Cloud Linux 2
- add allow vendor change option to pathing via salt
- Prevent useless package list refresh actions on zypper minions (bsc#1183661)
- Skip removed product classes with satellite-sync
- add grain for virt module features
- add virtual network creation action

-------------------------------------------------------------------
Fri Mar 05 15:45:18 CET 2021 - jgonzalez@suse.com

- version 4.2.6-1
- handle GPG keys when bootstrapping ssh minions (bsc#1181847)

-------------------------------------------------------------------
Thu Feb 25 12:12:31 CET 2021 - jgonzalez@suse.com

- version 4.2.5-1
- Ubuntu 18 has version of apt which does not correctly support
  auth.conf.d directory. Detect the working version and use this feature
  only when we have a higher version installed

-------------------------------------------------------------------
Wed Jan 27 13:11:15 CET 2021 - jgonzalez@suse.com

- version 4.2.4-1
- fix apt login for similar channel labels (bsc#1180803)
- Change behavior of mgrcompat wrapper after deprecation changes on Salt 3002
- Remove the virtpoller beacon
- Make autoinstallation provisoning compatible with GRUB and ELILO
  in addition to GRUB2 only (bsc#1164227)

-------------------------------------------------------------------
Thu Dec 03 13:58:41 CET 2020 - jgonzalez@suse.com

- version 4.2.3-1
- Added RHEL support.

-------------------------------------------------------------------
Wed Nov 25 12:32:54 CET 2020 - jgonzalez@suse.com

- version 4.2.2-1
- Fix: sync before start action chains (bsc#1177336)
- Revert: Sync state modules when starting action chain execution (bsc#1177336)
- Sync state modules when starting action chain execution (bsc#1177336)
- Handle group- and org-specific image pillars
- Remove hostname from /var/lib/salt/.ssh/known_hosts when deleting system (bsc#1176159)
- Fix grub2 autoinstall kernel path (bsc#1178060)
- use require in reboot trigger (bsc#1177767)
- add pillar option to get allowVendorChange option during dist upgrade
- Change VM creation state to handle installation from kernel, PXE or CDROM
- Fix action chain resuming when patches updating salt-minion don't cause service to be
  restarted (bsc#1144447)
- Make grub2 autoinstall kernel path relative to the boot partition root (bsc#1175876)
- Fix: do not break when pod status is empty (bsc#1161903)
- Move channel token information from sources.list to auth.conf on Debian 10 and Ubuntu 18 and newer
- Add support for activation keys on server configuration Salt modules
- ensure the yum/dnf plugins are enabled

-------------------------------------------------------------------
Fri Sep 18 12:29:55 CEST 2020 - jgonzalez@suse.com

- version 4.2.1-1
- Add uyuni-config-modules subpackage with Salt modules to configure
  Servers
- Fix the dnf plugin to add the token to the HTTP header (bsc#1175724)
- Fix reporting of missing products in product.all_installed (bsc#1165829)
- Fix: supply a dnf base when dealing w/repos (bsc#1172504)
- Fix: autorefresh in repos is zypper-only
- Add virtual network state change state to handle start, stop and delete
- Add virtual network state change state to handle start and stop
- Update package version to 4.2.0

-------------------------------------------------------------------
Thu Jul 23 13:41:10 CEST 2020 - jgonzalez@suse.com

- version 4.1.12-1
- fetch oracle-release when looking for RedHat Product Info (bsc#1173584)
- Force a refresh after deleting a virtual storage volume
- Prevent stuck Hardware Refresh actions on Salt 2016.11.10 based SSH minions (bsc#1173169)
- Require PyYAML version >= 5.1
- Log out of Docker registries after image build (bsc#1165572)
- Prevent "module.run" deprecation warnings by using custom mgrcompat module

-------------------------------------------------------------------
Wed Jul 01 16:13:07 CEST 2020 - jgonzalez@suse.com

- version 4.1.11-1
- Fix detection of CentOS systems to properly set bootstrap repo (bsc#1173556)
- Do not produce syntax error on custom ssh_agent Salt module when
  executing on Python 2 instance.

-------------------------------------------------------------------
Tue Jun 23 17:24:45 CEST 2020 - jgonzalez@suse.com

- version 4.1.10-1
- Remove VM disk type attribute
- Merge virtualization fragment into suma-minion pillar (bsc#1172962)

-------------------------------------------------------------------
Wed Jun 17 16:21:24 CEST 2020 - jgonzalez@suse.com

- version 4.1.9-1
- Add ssh_agent for CaaSP management

-------------------------------------------------------------------
Wed Jun 10 12:41:08 CEST 2020 - jgonzalez@suse.com

- version 4.1.8-1
- Avoid SSL certificate issue when bootstrapping OpenSUSE Leap 15.2 (bsc#1172712)
- Add Salt states for CaaSP cluster management
- Use minion fqdn instead of minion id as target in kiwi_collect_image
  runner. If fqdn is not present or is localhost, use minion ip as
  fallback (bsc#1170737)
- trust customer gpg key when metadata signing is enabled
- specify gpg key for RH systems in repo file (bsc#1172286)
- Implement CaaSP cluster upgrade procedure in cluster provider module.
- handle GPG check flags different for yum/dnf (bsc#1171859)
- Enable bootstrapping for Oracle Linux 6, 7 and 8
- Set YAML loader to fix deprecation warnings

-------------------------------------------------------------------
Wed May 20 11:06:24 CEST 2020 - jgonzalez@suse.com

- version 4.1.7-1
- Fix failing "Hardware Refresh" actions because wrong "instance_id" reported
  from minion due a captive portal on the network (bsc#1171491)
- Remove suseRegisterInfo package only if it's plain client (bsc#1171262)
- On Debian-like systems, install only required dependencies when installing salt
- Enable support for bootstrapping Ubuntu 20.04 LTS
- Pass image profile custom info values as Docker buildargs during image build
- Cluster Awareness: Introduce generic SLS files for Cluster Management
  and CaaSP Cluster Provider custom Salt module.
- Add virtual volume delete action
- Ubuntu no longer shows removed packages as installed (bsc#1171461)

-------------------------------------------------------------------
Mon Apr 13 09:37:50 CEST 2020 - jgonzalez@suse.com

- version 4.1.6-1
- Fix virt.deleted state dependency
- Make 'product' state module only available for minions with zypper >= 1.8.13 (bsc#1166699)
- Use saltutil states if available on the minion (bsc#1167556)
- Enable support for bootstrapping Astra Linux CE "Orel"
- remove key grains only when file and grain exists (bsc#1167237)
- Add virtual storage pool actions

-------------------------------------------------------------------
Thu Mar 19 12:17:47 CET 2020 - jgonzalez@suse.com

- version 4.1.5-1
- Enable support for bootstrapping Debian 9 and 10
- Adapt 'mgractionchains' module to work with Salt 3000

-------------------------------------------------------------------
Wed Mar 11 11:03:06 CET 2020 - jgonzalez@suse.com

- version 4.1.4-1
- cleanup key grains after usage
- Disable modularity failsafe mechanism for RHEL 8 repos (bsc#1164875)
- install dmidecode before HW profile update when missing
- Add mgr_start_event_grains.sls to update minion config
- Add 'product' custom state module to handle installation of
  SUSE products at client side (bsc#1157447)
- Support reading of pillar data for minions from multiple files (bsc#1158754)

-------------------------------------------------------------------
Mon Feb 17 12:56:29 CET 2020 - jgonzalez@suse.com

- version 4.1.3-1
- Do not workaround util.syncmodules for SSH minions (bsc#1162609)
- Force to run util.synccustomall when triggering action chains on SSH minions (bsc#1162683).
- Adapt sls file for pre-downloading in Ubuntu minions
- Add custom 'is_payg_instance' grain when instance is PAYG and not BYOS.

-------------------------------------------------------------------
Wed Jan 22 12:25:10 CET 2020 - jgonzalez@suse.com

- version 4.1.2-1
- Only install python2-salt on buildhosts if it is available
- sort formulas by execution order (bsc#1083326)
- split remove_traditional_stack into two parts. One for all systems and
  another for clients not being a Uyuni Server or Proxy (bsc#1121640)
- Change the order to check the version correctly for RES (bsc#1152795)
- Remove the virt-poller cache when applying Virtualization entitlement
- Force HTTP request timeout on public cloud grain (bsc#1157975)

-------------------------------------------------------------------
Wed Nov 27 17:08:25 CET 2019 - jgonzalez@suse.com

- version 4.1.1-1
- dockerhost: install python2 salt packages only when python2
  is available (bsc#1129627)
- Support license entry in kiwi image packages list
- Install yum plguin for only yum < 4 (bsc#1156173)
- Add self monitoring to Admin Monitoring UI (bsc#1143638)
- configure GPG keys and SSL Certificates for RHEL8 and ES8
- Always run Kiwi with empty cache (bsc#1155899)
- Do not show errors when polling internal metadata API (bsc#1155794)
- Avoid traceback error due lazy loading which_bin (bsc#1155794)
- Add missing "public_cloud" custom grain (bsc#1155656)
- Consider timeout value in salt remote script (bsc#1153181)
- Using new module path for which_bin to get rid of DeprecationWarning
- Fix: match `image_id` with newer k8s (bsc#1149741)
- Bump version to 4.1.0 (bsc#1154940)
- Always install latest available salt during bootstrap
- Create Kiwi cache dir if not present
- Require pmtools only for SLE11 i586 and x86_64 (bsc#1150314)
- do not break Servers registering to a Server
- Introduce dnf-susemanager-plugin for RHEL8 minions
- Provide custom grain to report "instance id" when running on Public Cloud instances
- enable Kiwi NG on SLE15
- disable legacy startup events for new minions
- implement provisioning for salt clients
- Bootstrapping RES6/RHEL6/SLE11 with TLS1.2 now shows error message. (bsc#1147126)
- Fix for issue with bootstrapping RES minions (bsc#1147126)
- dmidecode does not exist on ppc64le and s390x (bsc#1145119)
- update susemanager.conf to use adler32 for computing the server_id for new minions

-------------------------------------------------------------------
Wed Jul 31 17:42:04 CEST 2019 - jgonzalez@suse.com

- version 4.0.13-1
- Check for result of image rsync transfer to catch failures early (bsc#1104949)
- Force VM off before deleting it (bsc#1138127)
- Allow forcing off or resetting VMs
- Fix the indentation so that custom formulas can be read correctly (bsc#1136937)
- Make sure dmidecode is installed during bootstrap to ensure that hardware
  refresh works for all operating systems (bsc#1137952)
- Prevent stuck Actions when onboarding KVM host minions (bsc#1137888)
- Fix formula name encoding on Python 3 (bsc#1137533)
- Adapt tests for SUSE manager 4.0
- More thorougly disable the Salt mine in util.mgr_mine_config_clean_up (bsc#1135075)

-------------------------------------------------------------------
Wed May 15 15:35:23 CEST 2019 - jgonzalez@suse.com

- version 4.0.12-1
- SPEC cleanup
- Enabling certificate deployment for Leap 15.1 clients which is
  needed for bootstrapping
- States to enable/disable server monitoring
- Improve salt events processing performance (bsc#1125097)

-------------------------------------------------------------------
Mon Apr 22 12:23:43 CEST 2019 - jgonzalez@suse.com

- version 4.0.11-1
- Enable SLES11 OS Image Build Host
- Add support for Salt batch execution mode
- Do not configure Salt Mine in newly registered minions (bsc#1122837)
- use default 'master' branch in OSImage profile URL (bsc#1108218)
- Add Python linting makefile and PyLint configuration file

-------------------------------------------------------------------
Thu Apr 04 14:43:04 CEST 2019 - jgonzalez@suse.com

- version 4.0.10-1
- Update get_kernel_live_version module to support older Salt versions (bsc#1131490)

-------------------------------------------------------------------
Fri Mar 29 10:37:42 CET 2019 - jgonzalez@suse.com

- version 4.0.9-1
- Update get_kernel_live_version module to support SLES 15 live patches
- Support register minion using bootstrap repos for 18.04 and 16.04.

-------------------------------------------------------------------
Mon Mar 25 17:04:34 CET 2019 - jgonzalez@suse.com

- version 4.0.8-1
- Fix Salt error related to remove_traditional_stack when bootstrapping an Ubuntu
  minion (bsc#1128724)
- Adapt disablelocalrepos.sls syntax for Salt 2016.10 (rhel6, sle11) (bsc#1127706)
- Automatically trust SUSE GPG key for client tools channels on Ubuntu systems
- util.systeminfo sls has been added to perform different actions at minion startup(bsc#1122381)

-------------------------------------------------------------------
Sat Mar 02 00:16:05 CET 2019 - jgonzalez@suse.com

- version 4.0.7-1
- Add support for Ubuntu minions
- Add Ubuntu SSL-Cert SLS-Files

-------------------------------------------------------------------
Wed Feb 27 13:17:30 CET 2019 - jgonzalez@suse.com

- version 4.0.6-1
- Fix mgr_events to use current ioloop (bsc#1126280)
- add states for virtual machine actions
- Added option to read 'pkg_download_point_...' pillar values and use it in repo url

-------------------------------------------------------------------
Thu Jan 31 09:45:42 CET 2019 - jgonzalez@suse.com

- version 4.0.5-1
- prevent the pkgset beacon from firing during onboarding (bsc#1122896)
- Prevent excessive DEBUG logging from mgr_events engine

-------------------------------------------------------------------
Wed Jan 16 12:27:07 CET 2019 - jgonzalez@suse.com

- version 4.0.4-1
- Allow bootstrapping minions with a pending minion key being present (bsc#1119727)

-------------------------------------------------------------------
Mon Dec 17 14:46:00 CET 2018 - jgonzalez@suse.com

- version 4.0.3-1
- enhance bootstrap-repo urls for Centos and Opensuse
- use a Salt engine to process return results (bsc#1099988)

-------------------------------------------------------------------
Fri Oct 26 10:52:53 CEST 2018 - jgonzalez@suse.com

- version 4.0.2-1
- deploy SSL certificate during onboarding of openSUSE Leap 15.0 (bsc#1112163)
- install all available known kiwi boot descriptions
- Fix: Cleanup Kiwi cache in highstate (bsc#1109892)
- removed the ssl certificate verification while checking bootstrap repo URL (bsc#1095220)
- Removed the need for curl to be present at bootstrap phase (bsc#1095220)
- Migrate Python code to be Python 2/3 compatible
- Fix merging of image pillars
- Fix: delete old custom OS images pillar before generation (bsc#1105107)
- Generate OS image pillars via Java
- Store activation key in the Kiwi built image
- Implement the 2-phase registration of saltbooted minions (SUMA for Retail)

-------------------------------------------------------------------
Fri Aug 10 15:45:45 CEST 2018 - jgonzalez@suse.com

- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)
- Feat: add OS Image building with Kiwi FATE#322959 FATE#323057 FATE#323056
- Use custom Salt capabilities to prevent breaking backward compatibility (bsc#1096514)
- Update profileupdate.sls to report all versions installed (bsc#1089526)
- Do not install 'python-salt' on container build hosts with older Salt versions
  (bsc#1097699)
- Fix bootstrap error when removing traditional stack (bsc#1096009)

-------------------------------------------------------------------
Wed May 23 09:03:37 CEST 2018 - jgonzalez@suse.com

- version 3.2.13-1
- Changes to mgractionchains module in order to support action chains on
  minions using ssh-push connection method.
- Fix migration from traditional stack to salt registration (bsc#1093825)

-------------------------------------------------------------------
Wed May 16 17:38:30 CEST 2018 - jgonzalez@suse.com

- version 3.2.12-1
- Fix external pillar formula "ifempty" and "namespace" handling
- Fix profileupdate sls to execute retrieval of kernel live patching info (bsc#1091052)
- Use recursive merge on form pillars
- install python2/3 salt flavours on buildhosts to generate a compatible
  thin for the dockerimage beeing build (bsc#1092161)
- docker.login requires a list as input (bsc#1092161)

-------------------------------------------------------------------
Mon May 07 15:31:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.11-1
- fix hardware refresh when FQDN changes (bsc#1073267)
- Handle empty values. Do not pass optional fields to pillar in
  formulas if field is empty and no ifempty attr defined.
- Fixed processing of formulas with $scope: group
- Preserve order of formulas (bsc#1083326)

-------------------------------------------------------------------
Wed Apr 25 12:13:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.10-1
- create bootstrap repo only if it exist in the server (bsc#1087840)

-------------------------------------------------------------------
Mon Apr 23 09:26:09 CEST 2018 - jgonzalez@suse.com

- version 3.2.9-1
- Enqueue states applied from 'mgractionchains' to avoid failures when
  other states are already running at that time (bsc#1090502)

-------------------------------------------------------------------
Wed Apr 04 12:14:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.8-1
- Fix 'mgractionchains.resume' output when nothing to resume (bsc#1087401)

-------------------------------------------------------------------
Thu Mar 29 01:28:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.7-1
- Do not execute sumautil.get_kernel_live_version when inspecting an image

-------------------------------------------------------------------
Mon Mar 26 09:15:31 CEST 2018 - jgonzalez@suse.com

- version 3.2.6-1
- Provide new Salt module and Reactor to handle Action Chains on Minions
- use dockermod with new salt and user repository/tag option for build
- adapt names for gpg keys which have been changed
- perform docker login before building and inspecting images (bsc#1085635)

-------------------------------------------------------------------
Mon Mar 05 09:09:19 CET 2018 - jgonzalez@suse.com

- version 3.2.5-1
- support SLE15 product family

-------------------------------------------------------------------
Wed Feb 28 10:15:38 CET 2018 - jgonzalez@suse.com

- version 3.2.4-1
- Remove SUSE Manager repositories when deleting salt minions
  (bsc#1079847)
- Fix master tops merging when running salt>=2018

-------------------------------------------------------------------
Mon Feb 05 12:53:28 CET 2018 - jgonzalez@suse.com

- version 3.2.3-1
- Allow scheduling the change of software channel changes as an
  action. The previous channels remain accessible to the registered
  system until the action is executed.

-------------------------------------------------------------------
Fri Feb 02 13:06:31 CET 2018 - jgonzalez@suse.com

- version 3.2.2-1
- compare osmajorrelease in jinja always as integer

-------------------------------------------------------------------
Wed Jan 17 13:31:27 CET 2018 - jgonzalez@suse.com

- version 3.2.1-1
- addition of parameters to package manipulation states to improve
  SUSE Manager performance
- python3 compatibility fixes in modules and states
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)
- Fix image inspect when entrypoint is used by overwriting it
  (bsc#1070782)

-------------------------------------------------------------------
Tue Dec 12 12:05:09 CET 2017 - jgonzalez@suse.com

- version 3.1.13-1
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Wed Nov 29 10:15:59 CET 2017 - jgonzalez@suse.com

- version 3.1.12-1
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)

-------------------------------------------------------------------
Tue Nov 28 15:18:20 CET 2017 - jgonzalez@suse.com

- version 3.1.11-1
- Added state templates for deploying/comparing config channels for Salt
- Fix failing certs state for Tumbleweed (bsc#970630)
- Fix deprecated SLS files to avoid deprecation warnings during highstate (bsc#1041993)
- Support xccdf 1.2 namespace in openscap result file (bsc#1059319)
- ensure correct ordering of patches (bsc#1059801)
- fix create empty top.sls with no-op (bsc#1053038)
- Enabling certificate deployment for Leap 42.3 clients which is
  needed for bootstrapping
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Thu Sep 14 11:41:56 CEST 2017 - mc@suse.de

- version 3.1.10-1
- Kubernetes runner implementation
- addition of parameters to package manipulation states to improve
  SUSE Manager performance

-------------------------------------------------------------------
Fri Jul 21 12:02:24 CEST 2017 - mc@suse.de

- version 3.1.9-1
- disable gpgcheck for bootstrap repo to work with new libzypp (bsc#1049670)
- Remove spacewalk:* repos when removing traditional stack (bsc#1024267)
- susemanager-sls: fix certs state for Tumbleweed (bsc970630)
- susemanager-sls: fix certs state for Leap 42.2 (bsc970630)
- Make sumautil.get_kernel_live_version accept any kgr output 'active: NUM'
  where NUM > 0 (bsc#1044074)

-------------------------------------------------------------------
Mon Jun 19 16:37:53 CEST 2017 - mc@suse.de

- version 3.1.8-1
- Avoids formula leaking on pillar data (bsc#1044236)

-------------------------------------------------------------------
Mon May 29 15:53:51 CEST 2017 - mc@suse.de

- version 3.1.7-1
- fix yum plugin when installing patches on RHEL6 (bsc#1039294)
- Remove suseRegisterInfo in a separate yum transaction so that
  it's not called by yum plugin (bsc#1038732)
- Refactoring formulas in suma_minion external pillar (bsc#1033825)
- configure mime also during bootstrapping
- add missing file name attr to yum plugin state
- Encode formula to str (bsc#1033825)
- update yum on RedHat like systems
- update basic packages when bootstrapping with salt
- use include instead of state.apply channels to fix salt-ssh issue
  (bsc#1036268)

-------------------------------------------------------------------
Wed May 03 15:55:46 CEST 2017 - michele.bologna@suse.com

- version 3.1.6-1
- Targeting patches instead of packages for non Zypper patch installation
- add certificate state for CAASP
- add certificate state for SLES for SAP (bsc#1031659)

-------------------------------------------------------------------
Mon Apr 03 14:47:46 CEST 2017 - mc@suse.de

- version 3.1.5-1
- patch application pre-download
- pre-download packages scheduled for install

-------------------------------------------------------------------
Fri Mar 31 09:48:52 CEST 2017 - mc@suse.de

- version 3.1.4-1
- Fix mainframesysinfo module to use /proc/sysinfo on SLES11
  (bsc#1025758)
- take care that container and images are removed after inspect
- add name to Bootstrap repo
- Pre-create empty top.sls with no-op (bsc#1017754)
- create a random container name
- Fix pkgset beacon (bsc#1029350)
- set minion own key owner to bootstrap ssh_push_sudo_user
- runner to generate ssh key and execute cmd via proxies
- change ssh bootstrap state to generate and auth keys for
  salt-ssh push with tunnel

-------------------------------------------------------------------
Tue Mar 07 14:55:32 CET 2017 - mc@suse.de

- version 3.1.3-1
- add xccdf result xslt
- move move_minion_uploaded_files runner
- call docker inspect for additional data
- remove the container after inspecting it
- do not call image profile automatically after build
- Add state for image profileupdate
- add SUSE Manager prefix to state ids

-------------------------------------------------------------------
Tue Feb 07 15:12:30 CET 2017 - michele.bologna@suse.com

- version 3.1.2-1
- Configure mine.update to submit a job return event (bsc#1022735)
- Disable spacewalksd and spacewalk-update-status when switching to salt
  registration (bsc#1020902)
- Fix timezone handling for rpm installtime (bsc#1017078)
- Push build images into registry
- Configure a Docker build host
- Salt version update

-------------------------------------------------------------------
Wed Jan 11 16:57:58 CET 2017 - michele.bologna@suse.com

- version 3.1.1-1
- Version bump to 3.1

-------------------------------------------------------------------
Fri Dec 16 12:14:52 CET 2016 - michele.bologna@suse.com

- version 0.1.18-1
- Rename 'master' pillar to 'mgr_server'
- Add tunneling to salt-ssh support
- Provide SUMA static pillar data for unregistered minions (bsc#1015122)
- implement fetching kernel live version as module (FATE#319519)
- Removing '/usr/share/susemanager/pillar' path
- Retreiving SUMA static pillar data from ext_pillar (bsc1010674)
- Bugfix: Prevent salt-master ERROR messages if formulas files are missing
  (bsc#1009004)
- fallback to major os release version for cert names (bsc#1009749)

-------------------------------------------------------------------
Mon Nov 07 11:37:52 CET 2016 - michele.bologna@suse.com

- version 0.1.17-1
- Sync custom modules,grains,beacons always before pkg and hw profileupdate
  (bsc#1004725)
- Write distupgrade state for SP migration via salt
- New location of the salt-ssh key/cert pair. The previous location wasn't
  writable by the salt user

-------------------------------------------------------------------
Thu Oct 13 12:50:28 CEST 2016 - mc@suse.de

- version 0.1.16-1
- Only normalize lists (bsc#1004456)
- Call normalize() before add_scsi_info() (bsc#1004456)

-------------------------------------------------------------------
Thu Oct 06 14:51:43 CEST 2016 - mc@suse.de

- version 0.1.15-1
- Fixed bug with numbers in FormulaForm and improved ext_pillar script
- Added formula directories and formulas.sls to setup script
- External pillar script now also includes formula pillars
- Rename symlinks according to changed 'os' grain for Expanded Support
- Adding certs states for RHEL minion based on SLES-ES
- Rename udevdb scsi info json key
- Add support for mapping mainframe sysinfo
- Implement isX86() in jinja more correctly
- Initial support for querying and saving DMI info
- Add support for mapping the devices
- Actually handle incoming hardware details
- Initial version of the hardware.profileupdate sls
- Added pkgset beacon support in susemanager yum plugin
- trust also RES GPG key on all RedHat minions
- trust GPG keys for SUSE Manager Tools channel on RES
- configure bootstrap repository for RES
- Always enable salt-minion service while bootstrapping (bsc#990202)
- CentOS cert state symlinks and fixes
- states for installing certificate on redhat minions
- pkg.list_products only on Suse
- yum plugin to add jwt token as http header
- Generate SLE 12 bootstrap repo path correctly (bsc#994578)
- Merging top.sls files in base env (bsc#986770)
- Watch files instead of require

-------------------------------------------------------------------
Mon Jul 18 14:23:32 CEST 2016 - jrenner@suse.com

- version 0.1.14-1
- Initial version of the boostrap sls file
- update trust store when multiple certs in one file are available on SLE11
- update ca certificates only when they have changed
- assume no pillar data if the yml file for the minion does not exist
  (bsc#980354)
- Add distributable pkgset beacon for RPM database notifications

-------------------------------------------------------------------
Tue May 24 16:04:20 CEST 2016 - kwalter@suse.com

- version 0.1.13-1
- require refresh channels before pkg states (bsc#975424)
- use pillar and static states to install/remove packages (bsc#975424)

-------------------------------------------------------------------
Tue Apr 12 17:15:01 CEST 2016 - mc@suse.de

- version 0.1.12-1
- Add external pillar minion data resolver (bsc#974853)
- Add readme about ext_pillars
- remove pillar top.sls (bsc#974853)

-------------------------------------------------------------------
Wed Apr 06 08:46:20 CEST 2016 - mc@suse.de

- version 0.1.11-1
- generate include only if group_ids not empty
- use state names in custom_groups (bsc#973452)
- rename pillar group_id to group_ids
- Fix generating blank repositories because hitting salt file list cache
  (bsc#971004)
- package pillar/top.sls (bsc#973569)
- pre require coreutils to create the cert symlink in post (bsc#972160)
- disable local repositories on registration (bnc#971788)

-------------------------------------------------------------------
Mon Mar 21 17:38:33 CET 2016 - mc@suse.de

- version 0.1.10-1
- remove unused ext_pillar
- ignore missing .sls to include in certs/init.sls
- ignore packages_{machine_id}.sls if it's missing
- ignore missing pillar files at minion level
- ignore missing sls or pillars in custom_XXX/init.sls
  (bnc#970461, bnc#970316)
- Include minion custom_<machine_id>.sls only if it exists (#bnc970461)
- Ignore missing org custom state (#bnc970461)
- refactor in python (#bnc970316) (#bnc970461)

-------------------------------------------------------------------
Wed Mar 09 11:29:45 CET 2016 - mc@suse.de

- version 0.1.9-1
- include org and groups separately in top.sls
- refresh pillar on remove from group
- initial suma groups external pillar

-------------------------------------------------------------------
Wed Mar 02 12:09:13 CET 2016 - mc@suse.de

- version 0.1.8-1
- rename tables

-------------------------------------------------------------------
Tue Jan 26 14:07:41 CET 2016 - mc@suse.de

- version 0.1.7-1
- cleanup python code according to PR review
- reworked sumautil network utils to be more pythonic
- remove commented code
- get network if modules, checkstyle cleanup
- get minion primary ips

-------------------------------------------------------------------
Sat Jan 16 11:38:17 CET 2016 - mc@suse.de

- version 0.1.6-1
- custom grain for total num of cpus

-------------------------------------------------------------------
Thu Jan 14 13:30:59 CET 2016 - mc@suse.de

- version 0.1.5-1
- Port client python HW handling to server side java
- CPU socket count: try also lscpu and dmidecode

-------------------------------------------------------------------
Tue Jan 05 15:55:57 CET 2016 - mc@suse.de

- version 0.1.4-1
- Fill General and DMI hw info on minion registration

-------------------------------------------------------------------
Wed Dec 16 11:28:21 CET 2015 - mc@suse.de

- version 0.1.3-1
- Add static sls for package management

-------------------------------------------------------------------
Mon Nov 30 11:15:47 CET 2015 - mc@suse.de

- version 0.1.2-1
- force link creation
- use osfullname instead of os
- Cover sles12 machines reporing os grain SUSE
- Add support for deploying certificates to SLES11 minions

-------------------------------------------------------------------
Tue Nov 17 09:35:38 CET 2015 - jrenner@suse.com

- version 0.1.1-1
- Initial package release
  070701000000F4000081B400000000000000000000000163F87E300000194F000000000000000000000000000000000000002500000000susemanager-sls/susemanager-sls.spec  #
# spec file for package susemanager-sls
#
# Copyright (c) 2021 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#


%if 0%{?suse_version} > 1320 || 0%{?rhel}
# SLE15 builds on Python 3
%global build_py3   1
%endif

Name:           susemanager-sls
Version:        4.2.31
Release:        1
Summary:        Static Salt state files for SUSE Manager
License:        Apache-2.0 AND LGPL-2.1-only
Group:          Applications/Internet
Source:         %{name}-%{version}.tar.gz
Requires(pre):  coreutils
Requires(posttrans): spacewalk-admin
Requires:       susemanager-build-keys-web >= 12.0.1
%if 0%{?build_py3}
BuildRequires:  python3-mock
BuildRequires:  python3-pytest
BuildRequires:  python3-salt
# Different package names for SUSE and RHEL:
Requires:       (python3-PyYAML >= 5.1 or python3-pyyaml >= 5.1)
%else
BuildRequires:  python-mock
BuildRequires:  python-pytest
BuildRequires:  python-salt
Requires:       python-PyYAML >= 5.1
%endif
BuildRoot:      %{_tmppath}/%{name}-%{version}-build
BuildArch:      noarch

%description
Static Salt state files for SUSE Manager, where generic operations are
provided for the integration between infrastructure components.

%package -n uyuni-config-modules
Summary:        Salt modules to configure a Server
Group:          Applications/Internet

%description -n uyuni-config-modules
This package contains Salt execution and state modules that can be used
to configure a SUSE Manager or Uyuni Server.

%prep
%setup -q

%build

%install
mkdir -p %{buildroot}/usr/share/susemanager/salt/_grains
mkdir -p %{buildroot}/usr/share/susemanager/salt/_beacons
mkdir -p %{buildroot}/usr/share/susemanager/salt/_modules
mkdir -p %{buildroot}/usr/share/susemanager/salt/_states
mkdir -p %{buildroot}/usr/share/susemanager/salt-ssh
mkdir -p %{buildroot}/usr/share/susemanager/modules/pillar
mkdir -p %{buildroot}/usr/share/susemanager/modules/tops
mkdir -p %{buildroot}/usr/share/susemanager/modules/runners
mkdir -p %{buildroot}/usr/share/susemanager/modules/engines
mkdir -p %{buildroot}/usr/share/susemanager/modules/roster
mkdir -p %{buildroot}/usr/share/susemanager/pillar_data
mkdir -p %{buildroot}/usr/share/susemanager/formulas
mkdir -p %{buildroot}/usr/share/susemanager/formulas/metadata
mkdir -p %{buildroot}/usr/share/susemanager/reactor
mkdir -p %{buildroot}/usr/share/susemanager/scap
mkdir -p %{buildroot}/srv/formula_metadata
cp -R salt/* %{buildroot}/usr/share/susemanager/salt
cp -R salt-ssh/* %{buildroot}/usr/share/susemanager/salt-ssh
cp -R modules/pillar/* %{buildroot}/usr/share/susemanager/modules/pillar
cp -R modules/tops/* %{buildroot}/usr/share/susemanager/modules/tops
cp -R modules/runners/* %{buildroot}/usr/share/susemanager/modules/runners
cp -R modules/engines/* %{buildroot}/usr/share/susemanager/modules/engines
cp -R modules/roster/* %{buildroot}/usr/share/susemanager/modules/roster
cp -R pillar_data/* %{buildroot}/usr/share/susemanager/pillar_data
cp -R formulas/* %{buildroot}/usr/share/susemanager/formulas
cp -R formula_metadata/* %{buildroot}/srv/formula_metadata
cp -R reactor/* %{buildroot}/usr/share/susemanager/reactor
cp -R scap/* %{buildroot}/usr/share/susemanager/scap

# Manually install Python part to already prepared structure
cp src/beacons/pkgset.py %{buildroot}/usr/share/susemanager/salt/_beacons
cp src/grains/*.py %{buildroot}/usr/share/susemanager/salt/_grains/
rm %{buildroot}/usr/share/susemanager/salt/_grains/__init__.py
cp src/modules/*.py %{buildroot}/usr/share/susemanager/salt/_modules
rm %{buildroot}/usr/share/susemanager/salt/_modules/__init__.py
cp src/states/*.py %{buildroot}/usr/share/susemanager/salt/_states
rm %{buildroot}/usr/share/susemanager/salt/_states/__init__.py

# Install doc, examples
mkdir -p %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples/ldap
cp src/doc/* %{buildroot}/usr/share/doc/packages/uyuni-config-modules/
cp src/examples/uyuni_config_hardcode.sls %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples
cp src/examples/ldap/* %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples/ldap

%check
cd test
# Run py.test-3 for rhel
py.test%{?rhel:-3} test_pillar_suma_minion.py
cd ../src/tests
py.test%{?rhel:-3}

# Check that SLS files don't contain any call to "module.run" which has
# been replaced by "mgrcompat.module_run" calls.
! grep --include "*.sls" -r "module\.run" %{buildroot}/usr/share/susemanager/salt || exit 1

%post
# HACK! Create broken link when it will be replaces with the real file
ln -sf /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT \
   /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT 2>&1 ||:

%posttrans
# Run JMX exporter as Java Agent (bsc#1184617)
grep -q 'prometheus_monitoring_enabled\s*=\s*1\s*$' /etc/rhn/rhn.conf
if [[ $? == 0 ]]; then
  /usr/sbin/mgr-monitoring-ctl enable
fi

%files
%defattr(-,root,root)
%dir /usr/share/susemanager
/usr/share/susemanager/salt
/usr/share/susemanager/salt-ssh
/usr/share/susemanager/pillar_data
/usr/share/susemanager/modules
/usr/share/susemanager/modules/pillar
/usr/share/susemanager/modules/tops
/usr/share/susemanager/modules/runners
/usr/share/susemanager/modules/engines
/usr/share/susemanager/modules/roster
/usr/share/susemanager/formulas
/usr/share/susemanager/reactor
/usr/share/susemanager/scap
/srv/formula_metadata
%exclude /usr/share/susemanager/salt/_modules/uyuni_config.py
%exclude /usr/share/susemanager/salt/_states/uyuni_config.py
%ghost /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT

%files -n uyuni-config-modules
%defattr(-,root,root)
%dir /usr/share/susemanager
/usr/share/susemanager/salt/_modules/uyuni_config.py
/usr/share/susemanager/salt/_states/uyuni_config.py
%dir /usr/share/doc/packages/uyuni-config-modules
%doc /usr/share/doc/packages/uyuni-config-modules/*
%doc /usr/share/doc/packages/uyuni-config-modules/examples/*
%doc /usr/share/doc/packages/uyuni-config-modules/examples/ldap/*

%changelog
 070701000000F5000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001500000000susemanager-sls/test  070701000000F6000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000001A00000000susemanager-sls/test/data 070701000000F7000081B400000000000000000000000163F87E30000000B6000000000000000000000000000000000000002D00000000susemanager-sls/test/data/formula_order.json  ["branch-network","cpu-mitigations","dhcpd","grafana","image-synchronize","locale","prometheus","prometheus-exporters","pxe","saltboot","tftpd","virtualization-host","vsftpd","bind"]  070701000000F8000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002300000000susemanager-sls/test/data/formulas    070701000000F9000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000002C00000000susemanager-sls/test/data/formulas/metadata   070701000000FA000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003100000000susemanager-sls/test/data/formulas/metadata/bind  070701000000FB000081B400000000000000000000000163F87E3000000A23000000000000000000000000000000000000003A00000000susemanager-sls/test/data/formulas/metadata/bind/form.yml bind:
  $type: hidden-group

  config:
    $type: group
    options:
      $type: edit-group
      $optional: True
      $prototype:
        $type: text
        $key:
          $type: text
          $name: Option
    include_forwarders:
          $type: boolean
          $default: false

  configured_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      type:
        $type:  select
        $values: ["master", "slave"]
        $default: master
      notify:
        $type: boolean
        $default: False

  available_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      file:
        $type: text
      soa:
        $name: SOA
        $type: group
        ns:
          $name: NS
          $type: text
          $placeholder: ns@zone
          $ifEmpty: ns
        contact:
          $type: text
          $placeholder: admin@domain
          $ifEmpty: root@localhost
        serial:
          $default:  auto
          $ifEmpty:  auto
        class:
          $default:  IN
        refresh:
          $default:  8600
          $type: number
        retry:
          $default:  900
          $type: number
        expiry:
          $default:  86000
          $type: number
        nxdomain:
          $name: NXDOMAIN
          $default:  500
          $type: number
        ttl:
          $name: TTL
          $default:  8600
          $type: number
      records:
        $type: group
        A: 
          $type: edit-group
          $optional: true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Hostname
            $type: text
            $name: IP address
        NS:
          $name: NS
          $type: group
          $optional:  true
          '@':
             $type: edit-group
             $minItems: 0
             $prototype:
               $type: text
        CNAME:
          $name: CNAME
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Alias
            $type: text
            $name: Hostname
      generate_reverse: 
        $type: group
        $optional:  true
        net:
          $name: Network
          $optional:  true
        for_zones:
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $type: text
 070701000000FC000081B400000000000000000000000163F87E3000000069000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/bind/metadata.yml description:
  Settings for bind nameserver
group: general_system_configuration
after:
  - branch-network   070701000000FD000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/branch-network    070701000000FE000081B400000000000000000000000163F87E300000093C000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/branch-network/form.yml   branch_network:
  $type: hidden-group
  dedicated_NIC:
    $type: boolean
    $default: True

  nic:
    $default: eth1
    $visibleIf: .dedicated_NIC == true
  ip:
    $default: 192.168.128.1
    $visibleIf: .dedicated_NIC == true
  netmask:
    $default: 255.255.255.0
    $visibleIf: .dedicated_NIC == true

  configure_firewall:
    $type: boolean
    $default: true
    $help: Uncheck to configure firewall manually.

  firewall:
    $type: group
    $visibleIf: .configure_firewall == true
    enable_route:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_NAT:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_SLAAC_with_routing:
      $type: boolean
      $default: False
      $visibleIf: .enable_NAT == true
      $name: Force enable IPv6 SLAAC together with forwarding
      $help: Check to enable IPv6 autoconfiguration (SLAAC) even when Branch act as a router.
    open_dhcp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_dns_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_tftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_http_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_https_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_salt_ports:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ssh_port:
      $type: boolean
      $default: True
    open_xmpp_server_port:
      $type: boolean
      $default: True
    open_xmpp_client_port:
      $type: boolean
      $default: True

  forwarder:
    $type: select
    $values:
      - resolver
      - bind
      - dnsmasq
    $default: bind

  forwarder_fallback:
    $type: boolean
    $default: True

  srv_directory:
    $name:  'server directory'
    $type: text
    $default: '/srv/saltboot'
  srv_directory_user:
    $name: 'server directory user'
    $type: text
    $default: 'saltboot'
  srv_directory_group:
    $name: 'server directory group'
    $type: text
    $default: 'saltboot'
070701000000FF000081B400000000000000000000000163F87E300000005C000000000000000000000000000000000000004800000000susemanager-sls/test/data/formulas/metadata/branch-network/metadata.yml   description:
  Configuration of Branch Server proxy networks
group: SUSE_manager_for_retail
07070100000100000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations   07070100000101000081B400000000000000000000000163F87E30000000BA000000000000000000000000000000000000004500000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/form.yml  mitigations:
  $type: group

  name:
    $type: select
    $values: ["Auto",
              "Auto + No SMT",
              "Off",
              "Manual"
             ]
    $default: Auto
  07070100000102000081B400000000000000000000000163F87E3000000063000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/metadata.yml  description:
  Settings for kernel options for performance/security.
group: security_configuration
 07070100000103000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/dhcpd 07070100000104000081B400000000000000000000000163F87E3000001284000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/dhcpd/form.yml    dhcpd:
  $type: namespace
  domain_name:
    $placeholder: Enter domain name for managed LAN
  domain_name_servers:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
  listen_interfaces:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
    $help: List of interfaces to listen on
    $default:
    - eth1
  authoritative:
    $type: boolean
    $default: True
  max_lease_time:
    $default: 20001
    $type: number
  default_lease_time:
    $default: 20000
    $type: number
  subnets:
    $type: edit-group
    $minItems: 1
    $name: Network Configuration (subnet)
    $itemName: Network ${i}
    $prototype:
        $type: group
        $key:
          $type: text
          $name: Network IP
          $default: 192.168.1.0
        netmask:
          $type: text
          $default: 255.255.255.0
        domain_name:
          $type: text
          $optional: true
        comment:
          $type: text
          $optional: true
        range:
          $type: edit-group
          $name: Dynamic IP Range
          $minItems: 2
          $maxItems: 2
          $prototype:
            $type: text
          $default:
          - 192.168.1.51
          - 192.168.1.151
        broadcast_address:
          $type: text
          $default: 192.168.1.255
        routers:
          $type: edit-group
          $minItems: 1
          $prototype:
            $type: text
          $default:
          - 192.168.1.1
        next_server:
          $type: text
          $default: 192.168.1.1
          $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
          $optional: true
        filename:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/pxelinux.0
          $help: Specify the name of the initial boot file which is to be loaded by a client
          $optional: true
        filename_efi:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/grub.efi
          $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
          $optional: true
        hosts:
          $type: edit-group
          $minItems: 0
          $itemName: Host ${i}
          $name: Hosts with Static IP Addresses (with Defaults from Subnet)
          $optional: true
          $prototype:
            $key:
                $type: text
                $name: Hostname
            fixed_address:
                $type: text
                $optional: true
                $name: IP Address
            hardware:
                $type: text
                $name: Hardware Type and Address
                $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
                $help: Hardware Identifier - ethernet prefix is mandatory
            next_server:
                $type: text
                $default:
                $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
                $optional: true
            filename:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client
                $optional: true
            filename_efi:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
                $optional: true
            comment:
                $type: text
  hosts:
    $type: edit-group
    $minItems: 0
    $itemName: Host ${i}
    $name: Hosts with static IP addresses (with global defaults)
    $optional: true
    $prototype:
      $key:
        $type: text
        $name: Hostname
      fixed_address:
        $type: text
        $optional: true
        $name: IP address
      hardware:
        $type: text
        $name: Hardware Type and Address
        $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
        $help: Hardware Identifier - ethernet prefix is mandatory
      next_server:
        $type: text
        $default:
        $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
        $optional: true
      filename:
        $type: text
        $visibleIf: .next_server != ''
        $default:
        $help: Specify the name of the initial boot file which is to be loaded by a client
        $optional: true
      comment:
        $type: text
07070100000105000081B400000000000000000000000163F87E3000000065000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/dhcpd/metadata.yml    description:
  Settings for DHCP server
group: general_system_configuration
after:
  - branch-network   07070100000106000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003400000000susemanager-sls/test/data/formulas/metadata/grafana   07070100000107000081B400000000000000000000000163F87E300000074B000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/grafana/form.yml  grafana:
  $type: namespace

  enabled:
    $type: boolean
    $default: True
    $help: disasbled grafana

  admin_user:
    $type: text
    $name: Default admin user
    $required: true
    $disabled: "!formValues.grafana.enabled"
    
  admin_pass:
    $type: password
    $name: Default admin password  
    $required: true
    $disabled: "!formValues.grafana.enabled"

  datasources:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Configure the data sources used by Grafana.

    prometheus:
      $type: edit-group
      $minItems: 1
      $name: Prometheus
      $help: Configure Prometheus data sources.
      $itemName: Prometheus data source ${i}
      $prototype:
        $type: group
        $disabled: "!formValues.grafana.enabled"
        $key:
          $type: text
          $name: Datasource name
          $default: Prometheus
          $help: Data source name
        url:
          $type: url
          $default: http://localhost:9080
          $required: true
          $name: Prometheus URL
          $help: URL of a Prometheus instance

  dashboards:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Dashboards to install.

    add_uyuni_dashboard:
      $type: boolean
      $name: Uyuni server dashboard
      $help: Add dashboard for monitoring an Uyuni server
      $default: True

    add_uyuni_clients_dashboard:
      $type: boolean
      $name: Uyuni clients dashboard
      $help: Add dashboard for monitoring Uyuni clients
      $default: True

    add_postgresql_dasboard:
      $type: boolean
      $name: PostgreSQL dashboard
      $help: Add dashboard for monitoring a PostgreSQL database
      $default: True

    add_apache_dashboard:
      $type: boolean
      $name: Apache HTTPD dashboard
      $help: Add dashboard for monitoring an Apache HTTPD server
      $default: True
 07070100000108000081B400000000000000000000000163F87E300000003F000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/grafana/metadata.yml  description:
  Enable and configure Grafana.
group: monitoring
 07070100000109000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/image-synchronize 0707010000010A000081B400000000000000000000000163F87E3000000206000000000000000000000000000000000000004700000000susemanager-sls/test/data/formulas/metadata/image-synchronize/form.yml    image-synchronize:
    $type: hidden-group
    in_highstate:
        $name: Include Image Synchronization in Highstate
        $type: boolean
        $default: false

    whitelist:
        $type: edit-group
        $name: Synchronize only the listed images
        $minItems: 0
        $prototype:
            $type: text
            $help: Image name (without version)

    default_boot_image:
        $type: text
        $name: Default boot image
        $help: Default boot image used for first boot of a terminal
  0707010000010B000081B400000000000000000000000163F87E3000000051000000000000000000000000000000000000004B00000000susemanager-sls/test/data/formulas/metadata/image-synchronize/metadata.yml    description:
  Settings for image synchronization
group: SUSE_manager_for_retail
   0707010000010C000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/locale    0707010000010D000081B400000000000000000000000163F87E3000001537000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/locale/form.yml   # This file is part of locale-formula.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <http://www.gnu.org/licenses/>.

timezone:
  $type: group

  name:
    $type: select
    $values: ["CET",
              "CST6CDT",
              "EET",
              "EST",
              "EST5EDT",
              "GMT",
              "GMT+0",
              "GMT-0",
              "GMT0",
              "Greenwich",
              "HST",
              "MET",
              "MST",
              "MST7MDT",
              "NZ",
              "NZ-CHAT",
              "Navajo",
              "PST8PDT",
              "UCT",
              "UTC",
              "Universal",
              "W-SU",
              "WET",
              "Zulu",
              "Etc/GMT+1",
              "Etc/GMT+2",
              "Etc/GMT+3",
              "Etc/GMT+4",
              "Etc/GMT+5",
              "Etc/GMT+6",
              "Etc/GMT+7",
              "Etc/GMT+8",
              "Etc/GMT+9",
              "Etc/GMT+10",
              "Etc/GMT+11",
              "Etc/GMT+12",
              "Etc/GMT-1",
              "Etc/GMT-2",
              "Etc/GMT-3",
              "Etc/GMT-4",
              "Etc/GMT-5",
              "Etc/GMT-6",
              "Etc/GMT-7",
              "Etc/GMT-8",
              "Etc/GMT-9",
              "Etc/GMT-10",
              "Etc/GMT-11",
              "Etc/GMT-12",
              "Etc/GMT-13",
              "Etc/GMT-14",
              "Etc/GMT",
              "Etc/GMT+0",
              "Etc/GMT-0",
              "Etc/GMT0",
              "Etc/Greenwich",
              "Etc/UCT",
              "Etc/UTC",
              "Etc/Universal",
              "Etc/Zulu" 
              ]
    $default: CET

  hardware_clock_set_to_utc:
    $type: boolean
    $default: True

keyboard_and_language:
  $type: group

  language:
    $type: select
    $values: ["Afrikaans",
              "Arabic",
              "Asturian",
              "Bulgarian",
              "Bengali",
              "Bosnian",
              "Catalan",
              "Czech",
              "Welsh",
              "Danish",
              "German",
              "Greek",
              "English (UK)",
              "English (US)",
              "Spanish",
              "Estonian",
              "Finnish",
              "French",
              "Galician",
              "Gujarati",
              "Hebrew",
              "Hindi",
              "Croatian",
              "Hungarian",
              "Indonesian",
              "Italian",
              "Japanese",
              "Georgian",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Macedonian",
              "Marathi",
              "Norwegian",
              "Dutch",
              "Nynorsk",
              "Punjabi",
              "Polish",
              "Portuguese (Brazilian)",
              "Portuguese",
              "Romanian",
              "Russian",
              "Sinhala",
              "Slovak",
              "Slovenian",
              "Serbian",
              "Swedish",
              "Tamil",
              "Tajik",
              "Thai",
              "Turkish",
              "Ukrainian",
              "Vietnamese",
              "Walloon",
              "Xhosa",
              "Simplified Chinese",
              "Traditional Chinese",
              "Zulu"
              ]
    $default: English (US)

  keyboard_layout:
    $type: select
    $values: ["Arabic",
              "Belgian",
              "Canadian (Multilingual)",
              "Croatian",
              "Czech",
              "Czech (qwerty)",
              "Danish",
              "Dutch",
              "Dvorak",
              "English (UK)",
              "English (US)",
              "Estonian",
              "Finnish",
              "French",
              "French (Canada)",
              "French (Switzerland)",
              "German",
              "German (Switzerland)",
              "German (with deadkeys)",
              "Greek",
              "Hungarian",
              "Icelandic",
              "Italian",
              "Japanese",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Norwegian",
              "Polish",
              "Portuguese",
              "Portuguese (Brazil)",
              "Portuguese (Brazil  US accents)",
              "Romanian",
              "Russian",
              "Serbian",
              "Simplified Chinese",
              "Slovak",
              "Slovak (qwerty)",
              "Slovene",
              "Spanish",
              "Spanish (Asturian variant)",
              "Spanish (CP 850)",
              "Spanish (Latin America)",
              "Swedish",
              "Tajik",
              "Traditional Chinese",
              "Turkish",
              "Ukrainian",
              "US International" 
              ]
    $default: English (US)
 0707010000010E000081B400000000000000000000000163F87E3000000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/locale/metadata.yml   description:
  Settings for language, keyboard, and timezone
group: general_system_configuration
after:
  - users   0707010000010F000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters  07070100000110000081B400000000000000000000000163F87E30000003FF000000000000000000000000000000000000004A00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/form.yml node_exporter:
  $type: group
  $help: Prometheus exporter for hardware and OS metrics.

  enabled:
    $type: boolean
    $default: True

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9100"
    $help: Please refer to the documentation for available options.

apache_exporter:
  $type: group
  $help: Prometheus exporter for apache mod_status statistics.

  enabled:
    $type: boolean
    $default: False

  args:
    $name: "Arguments"
    $type: text
    $default: --telemetry.address=":9117"
    $help: Please refer to the documentation for available options.

postgres_exporter:
  $type: group
  $help: Prometheus exporter for PostgreSQL server metrics.

  enabled:
    $type: boolean
    $default: False

  data_source_name:
    $type: text
    $default: postgresql://user:passwd@localhost:5432/database?sslmode=disable

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9187"
    $help: Please refer to the documentation for available options.
 07070100000111000081B400000000000000000000000163F87E3000000061000000000000000000000000000000000000004E00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/metadata.yml description:
  Enable and configure Prometheus exporters for managed systems.
group: monitoring

   07070100000112000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003700000000susemanager-sls/test/data/formulas/metadata/prometheus    07070100000113000081B400000000000000000000000163F87E300000093C000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/prometheus/form.yml   prometheus:
  $type: namespace

  enabled:
    $type: boolean
    $default: True

  scrape_interval:
    $type: number
    $name: Scrape interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  evaluation_interval:
    $type: number
    $name: Evaluation interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  mgr:
    $type: group
    $name: Uyuni Server
    $disabled: "!prometheus.enabled"

    monitor_server:
      $name: Monitor this server
      $type: boolean
      $default: True

    autodiscover_clients:
      $name: Autodiscover clients 
      $type: boolean
      $default: True

    sd_username:
      $type: text
      $name: Username
      $help: Username for auto-discovering clients
      $default: admin
      $visibleIf: .autodiscover_clients == true
      $required: true

    sd_password:
      $type: password
      $name: Password
      $help: Password for auto-discovering clients
      $visibleIf: .autodiscover_clients == true
      $required: true

  alerting:
    $type: group
    $disabled: "!prometheus.enabled"

    alertmanager_service:
      $type: boolean
      $default: True
      $name: Enable local Alertmanager service

    use_local_alertmanager:
      $type: boolean
      $name: Use local Alertmanager
      $help: Use local Alertmanager for this Prometheus instance
      $visibleIf: .alertmanager_service == true
      $default: True

    alertmanagers:
      $type: edit-group
      $minItems: 0
      $itemName: Target ${i}
      $prototype:
        $type: group 
        $key:
          $type: text 
          $name: "IP Address : Port"
          $default: localhost:9093
          $match: ".*\\:\\d{1,5}"

    rule_files:
      $type: edit-group
      $minItems: 0
      $prototype:
        $type: text
        $default: /etc/prometheus/my-rules.yml
        $required: true

  scrape_configs:
    $type: edit-group
    $name: User defined scrape configurations
    $minItems: 0
    $itemName: File-based service discovery ${i}
    $disabled: "!prometheus.enabled"
    $prototype:
      $type: group 
      $key:
        $type: text 
        $name: "Job name"
      files:
        $type: edit-group
        $minItems: 1
        $prototype:
          $type: text
          $default: /etc/prometheus/my-scrape-config.yml
          $required: true


07070100000114000081B400000000000000000000000163F87E3000000042000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/prometheus/metadata.yml   description:
  Enable and configure Prometheus
group: monitoring

  07070100000115000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003000000000susemanager-sls/test/data/formulas/metadata/pxe   07070100000116000081B400000000000000000000000163F87E30000002B4000000000000000000000000000000000000003900000000susemanager-sls/test/data/formulas/metadata/pxe/form.yml  pxe:
  $type: hidden-group

  kernel_name:
     $name: 'Kernel Filename'
     $type: text
     $default: 'linux'

  initrd_name:
     $name: 'Initrd Filename'
     $type: text
     $default: 'initrd.gz'

  default_kernel_parameters:
     $name: 'Kernel Command Line Parameters'
     $type: text
     $default: 'panic=60 ramdisk_size=710000 ramdisk_blocksize=4096 vga=0x317 splash=silent kiwidebug=0'

  pxe_root_directory:
     $name:  'PXE Root Directory'
     $type: text
     $default: '/srv/saltboot'

  branch_id:
     $name: 'Branch Id'
     $type: text
     $placeholder: 'Enter unique Branch server ID (e.g. "B0001")'
     $help: 'Branch server ID is used as a prefix in terminal ID'
07070100000117000081B400000000000000000000000163F87E3000000067000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/pxe/metadata.yml  description:
  PXE settings for branch server
group: SUSE_manager_for_retail
after:
  - branch-network
 07070100000118000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003500000000susemanager-sls/test/data/formulas/metadata/saltboot  07070100000119000081B400000000000000000000000163F87E300000157C000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/saltboot/form.yml partitioning:
    $name: Disk Partitioning
    $type: edit-group
    $itemName: Disk ${i}
    $minItems: 1
    $prototype:
        $type: group
        $key:
            $type: text
            $name: Disk Symbolic ID
            $placeholder: Enter disk symbolic ID (e.g. disk1, disk2, md0 for RAID devices)
            $help: Disk Symbolic ID is used together with Partition Symbolic ID for RAID completion.
        type:
            $type: select
            $name: Device Type
            $values:
              - RAID
              - DISK
            $default: DISK
        device:
            $type: text
            $visibleIf: .type == DISK
            $name: Disk Device
            $placeholder: Enter target disk device name (e.g. /dev/sda)
            $optional: true
        level:
            $visibleIf: .type == RAID
            $type: select
            $name: RAID Level
            $values:
              -
              - 0
              - 1
              - 4
              - 5
              - 6
              - 10
              - linear
              - multipath
            $default:
            $optional: true
        devices:
            $visibleIf: .type == RAID
            $type: edit-group
            $name: Symbolic IDs of devices to used for RAID device type
            $minItems: 0
            $prototype:
                $type: text
                $help: E.g. disk1p1, disk2p1, ... Combination of Disk symbolic ID and Partition symbolic ID to describe devices/partitions used to build RAID device.
                $placeholder: Enter combination of Disk and Partition symbolic ID (e.g. disk1part1, disk2part1, ...)
            $optional: True
        disklabel:
            $type: select
            $name: Partition table type
            $values:
              - gpt
              - msdos
              - none
        partitions:
            $type: edit-group
            $itemName: Partition ${i}
            $minItems: 1
            $optional: True
            $visibleIf: .disklabel != "none"
            $prototype:
                $type: group
                $key:
                    $type: text
                    $name: Partition Symbolic ID
                    $help: E.g. p1, p2, ... Together with Disk symbolic ID is used for RAID completion.
                    $placeholder: Enter partition symbolic ID (e.g. part1, part2, ...)
                size_MiB:
                    $type: number
                    $name: Partition Size (MiB)
                    $help: Leave blank to acquire remaining empty space on the disk.
                    $optional: True
                mountpoint:
                    $type: text
                    $name: Device Mount Point
                    $help: What should the partition be mount as - /, swap, /var, ...
                    $optional: True
                format:
                    $type: select
                    $name: Filesystem Format
                    $values:
                      -
                      - btrfs
                      - ext4
                      - xfs
                      - vfat
                      - swap
                    $optional: True
                image:
                    $type: text
                    $name: OS Image to Deploy
                    $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
                    $optional: True
                image_version:
                    $visibleIf: .image != ''
                    $type: text
                    $help: Version of OS Image. Leave blank for most recent.
                    $optional: True
                luks_pass:
                    $optional: True
                    $type: text
                    $name: Partition Encryption Password
                    $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
                flags:
                    $type: select
                    $name: Partition Flags
                    $values:
                      -
                      - swap
                      - raid
                      - bios_grub
                      - esp
                      - boot
                    $default:
        mountpoint:
            $type: text
            $name: Device Mount Point
            $help: What should the partition be mount as - /, swap, /var, ...
            $optional: True
            $visibleIf: .disklabel == "none"
        format:
            $type: select
            $name: Filesystem Format
            $visibleIf: .disklabel == "none"
            $values:
              -
              - btrfs
              - ext4
              - xfs
              - vfat
              - swap
            $optional: True
        image:
            $visibleIf: .disklabel == "none"
            $type: text
            $name: OS Image to Deploy
            $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
            $optional: True
        image_version:
            $visibleIf: .image != ''
            $type: text
            $help: Version of OS Image. Leave blank for most recent.
            $optional: True
        luks_pass:
            $visibleIf: .disklabel == "none"
            $optional: True
            $type: text
            $name: Partition Encryption Password
            $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
0707010000011A000081B400000000000000000000000163F87E300000005B000000000000000000000000000000000000004200000000susemanager-sls/test/data/formulas/metadata/saltboot/metadata.yml description:
  Control deployment and boot of POS terminals
group: SUSE_manager_for_retail
 0707010000011B000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/tftpd 0707010000011C000081B400000000000000000000000163F87E3000000137000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/tftpd/form.yml    tftpd:
  $type: hidden-group

  listen_ip:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  root_dir:
     $name: 'TFTP base directory'
     $type: text
     $default: '/srv/tftpboot'

  tftpd_user:
     $name: 'run TFTP under user'
     $type: text
     $default: 'tftp'

      
 0707010000011D000081B400000000000000000000000163F87E3000000068000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/tftpd/metadata.yml    description:
  Settings for tftpd service
group: general_system_configuration
after:
  - branch-network
0707010000011E000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/virtualization-host   0707010000011F000081B400000000000000000000000163F87E300000005F000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/virtualization-host/form.yml  hypervisor:
  $type: select
  $values: ["KVM",
            "Xen"
            ]
  $default: KVM
 07070100000120000081B400000000000000000000000163F87E3000000055000000000000000000000000000000000000004D00000000susemanager-sls/test/data/formulas/metadata/virtualization-host/metadata.yml  description:
  Settings for virtualization host.
group: general_system_configuration
   07070100000121000041FD00000000000000000000000163F87E3000000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/vsftpd    07070100000122000081B400000000000000000000000163F87E3000000604000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/vsftpd/form.yml   vsftpd_config:
  $type: hidden-group

  anon_root:
     $name: 'FTP server directory'
     $type: text
     $default: '/srv/ftp'

     
  listen_address:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  ssl_enable:
     $name:  'Enable ssl'
     $type: boolean
     $default: false
     
  secure_chroot_dir: 
     $name:  'Chroot dir'
     $type: text
     $default: '/usr/share/empty'

  anonymous_enable:
     $name:  'Allow anonymous FTP'
     $type: boolean
     $default: true

  allow_anon_ssl:
     $name:  'Allow SSL for anonymous'
     $type: boolean
     $default: true

  listen:
     $name:  'Run standalone'
     $type: boolean
     $default: true

  local_enable:
     $name:  'Allow local users'
     $type: boolean
     $default: true

  dirmessage_enable:
     $name:  'Activate directory messages'
     $type: boolean
     $default: true

  use_localtime: 
     $name:  'Use localtime'
     $type: boolean
     $default: true

  xferlog_enable: 
     $name:  'Activate logging of transfers'
     $type: boolean
     $default: true

  connect_from_port_20: 
     $name:  'Connect from port 20'
     $type: boolean
     $default: true

  pam_service_name: 
     $name:  'PAM service name'
     $type: text
     $default: 'vsftpd'

  rsa_cert_file:
     $name:  'RSA certificate file'
     $type: text
     $default: '/etc/ssl/certs/[ssl-cert-file].pem'

  rsa_private_key_file:
     $name:  'RSA private key file'
     $type: text
     $default: '/etc/ssl/private/[ssl-cert-file].key'

      07070100000123000081B400000000000000000000000163F87E3000000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/vsftpd/metadata.yml   description:
  Settings for vsftpd for branchserver
group: general_system_configuration
after:
  - branch-network   07070100000124000081B400000000000000000000000163F87E3000000018000000000000000000000000000000000000002E00000000susemanager-sls/test/data/group_formulas.json {"9":["locale","tftpd"]}07070100000125000081B400000000000000000000000163F87E3000000077000000000000000000000000000000000000002F00000000susemanager-sls/test/data/minion_formulas.json    {"suma-refhead-min-centos7.mgr.suse.de":["branch-network"],"suma-refhead-min-sles12sp4.mgr.suse.de":["branch-network"]} 07070100000126000081B400000000000000000000000163F87E3000001F64000000000000000000000000000000000000002400000000susemanager-sls/test/test_engine.py   import logging
import pytest
import psycopg2
import shlex
import subprocess
from mgr_events import Responder, DEFAULT_COMMIT_BURST
from mock import MagicMock, patch, call
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database, drop_database


ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger('mgr_events')
log.setLevel(logging.DEBUG)
log.addHandler(ch)


@pytest.fixture(scope="session")
def postgres(request):
    proc = subprocess.Popen(shlex.split("su postgres -c \"pg_ctl -D ~/data -l ~/logfile start\""))
    def finalizer():
        subprocess.Popen(shlex.split("su postgres -c \"pg_ctl stop -D /var/lib/pgsql/data\""))
    request.addfinalizer(finalizer)
    outs, errs = proc.communicate(timeout=15)
    yield proc


@pytest.fixture(scope="session")
def db_engine(postgres):
    return create_engine("postgresql://postgres@/test")


@pytest.fixture
def db_connection(db_engine):
    if not database_exists(db_engine.url):
        create_database(db_engine.url)
    with psycopg2.connect(user='postgres', host="localhost", dbname="test") as connection:
        yield connection
    drop_database(db_engine.url)


def new_connection():
    return psycopg2.connect(user='postgres', host="localhost", dbname="test")


@pytest.fixture
def create_tables(db_connection):
    sql = """CREATE TABLE suseSaltEvent (
        id SERIAL PRIMARY KEY,
        minion_id CHARACTER VARYING(256),
        data TEXT NOT NULL,
        queue NUMERIC NOT NULL
    );"""
    db_connection.cursor().execute(sql)
    db_connection.commit()


def delete_table(conn, table):
    conn.cursor().execute("DELETE FROM %s" % table)
    conn.commit()


@pytest.fixture
def responder(db_connection, create_tables):
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        return Responder(
            MagicMock(),  # mock event_bus
            {
                'postgres_db': {
                     'dbname': 'tests',
                     'user': 'postgres',
                     'password': '',
                     'host': 'localhost',
                     'notify_channel': 'suseSaltEvent'
                 },
                'events': {
                    'thread_pool_size': 3
                }
            }
        )


def test_connection_recovery_on_insert(db_connection, responder):
    disposable_connection = new_connection()
    responder.connection = disposable_connection
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder._insert('salt/minion/2/start', {'value': 2})
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 2


def test_connection_recovery_on_commit(db_connection, responder):
    responder.connection = new_connection()
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder.attempt_commit()
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_insert_start_event(responder, db_connection):
    responder.event_bus.unpack.return_value = ('salt/minion/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1


def test_insert_job_return_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_insert_batch_start_event(responder):
    responder.event_bus.unpack.return_value = ('salt/batch/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_discard_batch_presence_ping_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'metadata': {'batch-mode': True}})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 0


def test_keep_presence_ping_event_without_batch(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'id': 'testminion'})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_commit_scheduled_on_init(responder):
    assert responder.event_bus.io_loop.call_later.call_count == 1


def test_commit_empty_queue(responder):
    responder.counters = [0, 0, 0, 0]
    with patch.object(responder, 'event_bus', MagicMock()):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            responder.attempt_commit()
            assert responder.connection.commit.call_count == 0
        assert responder.tokens == DEFAULT_COMMIT_BURST


def test_postgres_notification(responder):
    with patch.object(responder, 'cursor'):
        responder._insert('salt/minion/1/start', {'value': 1, 'id': 'testminion'})
        assert responder.counters == [0, 0, 0, 0]
        assert responder.tokens == DEFAULT_COMMIT_BURST -1
        assert responder.cursor.execute.mock_calls[-1:] == [call("NOTIFY suseSaltEvent, '0,0,1,0';")]

def test_add_token(responder):
    responder.tokens = 0
    responder.add_token()
    assert responder.tokens == 1

def test_add_token_max(responder):
    responder.add_token()
    assert responder.tokens == DEFAULT_COMMIT_BURST

def test_commit_avoidance_without_tokens(responder):
    with patch.object(responder, 'cursor'):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            mock_connection.encoding = 'utf-8'
            responder.tokens = 0
            responder._insert('salt/minion/1/start', {'id': 'testminion', 'value': 1})
            assert responder.counters == [0, 0, 1, 0]
            assert responder.tokens == 0
            assert responder.connection.commit.call_count == 0
            assert responder.cursor.execute.mock_calls == [call('INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);', ('testminion', '{"tag": "salt/minion/1/start", "data": {"id": "testminion", "value": 1}}', 2))]


def test_postgres_connect(db_connection, responder):
    disposable_connection = new_connection()
    disposable_connection.close()
    responder.connection = disposable_connection
    with patch('mgr_events.time') as mock_time:
        with patch('mgr_events.psycopg2') as mock_psycopg2:
            mock_psycopg2.connect.side_effect = [psycopg2.OperationalError, db_connection]
            mock_psycopg2.OperationalError = psycopg2.OperationalError
            responder.db_keepalive()
            assert mock_psycopg2.connect.call_count == 2
    mock_time.sleep.assert_called_once_with(5)


def test_postgres_connect_with_port(responder):
    responder.config['postgres_db']['port'] = '1234'
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        responder._connect_to_database()
        mock_psycopg2.connect.assert_called_once_with(u"dbname='tests' user='postgres' host='localhost' port='1234' password=''")
07070100000127000081B400000000000000000000000163F87E300000039B000000000000000000000000000000000000003000000000susemanager-sls/test/test_pillar_suma_minion.py   # -*- coding: utf-8 -*-
'''
:codeauthor:    Michael Calmer <Michael.Calmer@suse.com>
'''

from mock import MagicMock, patch

import sys
sys.path.append("../modules/pillar")
import os
import copy

import suma_minion


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert suma_minion.__virtual__() == True

def test_formula_pillars():
    '''
    Test formula ordering
    '''
    suma_minion.FORMULAS_DATA_PATH = os.path.sep.join([os.path.abspath(''), 'data'])
    suma_minion.FORMULA_ORDER_FILE = os.path.sep.join([os.path.abspath(''), 'data', 'formula_order.json'])
    suma_minion.MANAGER_FORMULAS_METADATA_MANAGER_PATH = os.path.sep.join([os.path.abspath(''), 'data', 'formulas', 'metadata'])
    pillar = suma_minion.formula_pillars("suma-refhead-min-sles12sp4.mgr.suse.de", [9])
    assert "formulas" in pillar
    assert pillar["formulas"] == ['branch-network', 'locale', 'tftpd']

 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!                                                                                                                                                                                                                                                                                                                                                                                                        