07070100000000000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001000000000susemanager-sls   07070100000001000081B400000000000000000000000160C1E96E00000023000000000000000000000000000000000000001B00000000susemanager-sls/.gitignore    *.cache*
*__pycache__*
*.pyc
*.pyo
 07070100000002000081B400000000000000000000000160C1E96E000004A8000000000000000000000000000000000000002000000000susemanager-sls/Makefile.python   THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../../rel-eng/Makefile.python

# Docker tests variables
DOCKER_CONTAINER_BASE = uyuni-master
DOCKER_REGISTRY       = registry.mgr.suse.de
DOCKER_RUN_EXPORT     = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES        = -v "$(CURDIR)/../../:/manager"

__pylint ::
	$(call update_pip_env)
	pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true

__pytest ::
	$(call update_pip_env)
	$(call install_pytest)
	cd src/tests; pytest --disable-warnings --tb=native --color=yes -v

docker_pylint ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls/; make -f Makefile.python __pylint"

docker_shell ::
	docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash

docker_pytest ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls; make -f Makefile.python __pytest"
07070100000003000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002100000000susemanager-sls/formula_metadata  07070100000004000081B400000000000000000000000160C1E96E000001A1000000000000000000000000000000000000002B00000000susemanager-sls/formula_metadata/README.md    All metadata for your custom Salt Formulas should be put here. (/srv/formula_metadata/<your-formula-name>/)
The state files need to be on a salt file root and belong to /srv/salt.

To learn more about Salt Formulas and how to write them visit: https://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html
To use your formulas effectively with SUSE Manager they additionally need a form.yml file.
   07070100000005000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001900000000susemanager-sls/formulas  07070100000006000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002200000000susemanager-sls/formulas/metadata 07070100000007000081B400000000000000000000000160C1E96E000001A4000000000000000000000000000000000000002C00000000susemanager-sls/formulas/metadata/README.md   The metadata of Salt Formulas that get installed per RPM belongs in this directory.

For more information visit:
https://github.com/SUSE/spacewalk/wiki/Using-Salt-formulas-with-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Writing-Salt-Formulas-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Salt-Formula-RPMs-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/How-Salt-formulas-in-SUSE-Manager-work
07070100000008000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/formulas/states   07070100000009000081B400000000000000000000000160C1E96E00000022000000000000000000000000000000000000002D00000000susemanager-sls/formulas/states/formulas.sls  include: {{ pillar["formulas"] }}
  0707010000000A000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001800000000susemanager-sls/modules   0707010000000B000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/modules/engines   0707010000000C000081B400000000000000000000000160C1E96E00001E35000000000000000000000000000000000000002E00000000susemanager-sls/modules/engines/mgr_events.py # -*- coding: utf-8 -*-
'''
mgr_events.py is a SaltStack engine that writes selected events to SUSE
Manager's PostgreSQL database. Additionally, it sends notifications via the
LISTEN/NOTIFY mechanism to alert SUSE Manager of newly available events.

mgr_events.py tries to keep the I/O low in high load scenarios. Therefore
events are INSERTed once they come in, but not necessarily COMMITted
immediately.

The algorithm is an implementation of token bucket:
 - a COMMIT costs one token
 - initially, commit_burst tokens are available
 - every commit_interval seconds, one new token is generated
   (up to commit_burst)
 - when an event arrives and there are tokens available it is COMMITted
   immediately
 - when an event arrives but no tokens are available, the event is INSERTed but
   not COMMITted yet. COMMIT will happen as soon as a token is available

.. versionadded:: 2018.3.0

:depends: psycopg2

Minimal configuration example

.. code:: yaml

    engines:
      - mgr_events:
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              notify_channel: suseSaltEvent

Full configuration example

.. code:: yaml

    engines:
      - mgr_events:
          commit_interval: 1
          commit_burst: 100
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              port: 5432
              notify_channel: suseSaltEvent

Most of the values have a sane default. But we still need the login and host
for the PostgreSQL database. Only the `notify_channel` there is optional. The
default for host is 'localhost'.
'''

# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import fnmatch
import hashlib

try:
    import psycopg2
    HAS_PSYCOPG2 = True
except ImportError:
    HAS_PSYCOPG2 = False

# Import salt libs
import salt.ext.tornado
import salt.utils.event
import json

log = logging.getLogger(__name__)

DEFAULT_COMMIT_INTERVAL = 1
DEFAULT_COMMIT_BURST = 100

def __virtual__():
    return HAS_PSYCOPG2


class Responder:
    def __init__(self, event_bus, config):
        self.config = config
        self.config.setdefault('commit_interval', DEFAULT_COMMIT_INTERVAL)
        self.config.setdefault('commit_burst', DEFAULT_COMMIT_BURST)
        self.config.setdefault('postgres_db', {})
        self.config['postgres_db'].setdefault('host', 'localhost')
        self.config['postgres_db'].setdefault('notify_channel', 'suseSaltEvent')
        self.counters = [0 for i in range(config['events']['thread_pool_size'] + 1)]
        self.tokens = config['commit_burst']
        self.event_bus = event_bus
        self._connect_to_database()
        self.event_bus.io_loop.call_later(config['commit_interval'], self.add_token)

    def _connect_to_database(self):
        db_config = self.config.get('postgres_db')
        if 'port' in db_config:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' port='{port}' password='{password}'".format(**db_config)
        else:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' password='{password}'".format(**db_config)
        log.debug("%s: connecting to database", __name__)
        while True:
            try:
                self.connection = psycopg2.connect(conn_string)
                break
            except psycopg2.OperationalError as err:
                log.error("%s: %s", __name__, err)
                log.error("%s: Retrying in 5 seconds.", __name__)
                time.sleep(5)
        self.cursor = self.connection.cursor()

    def _insert(self, tag, data):
        self.db_keepalive()
        if any([
            fnmatch.fnmatch(tag, "salt/minion/*/start"),
            fnmatch.fnmatch(tag, "salt/job/*/ret/*"),
            fnmatch.fnmatch(tag, "salt/beacon/*"),
            fnmatch.fnmatch(tag, "salt/engines/*"),
            fnmatch.fnmatch(tag, "salt/batch/*/start"),
            fnmatch.fnmatch(tag, "suse/manager/image_deployed"),
            fnmatch.fnmatch(tag, "suse/manager/image_synced"),
            fnmatch.fnmatch(tag, "suse/systemid/generate")
        ]) and not self._is_salt_mine_event(tag, data) and not self._is_presence_ping(tag, data):
            queue = 0
            if 'id' in data:
                hash_sum = hashlib.md5(data.get("id").encode(self.connection.encoding)).hexdigest()[0:8]
                queue = int(hash_sum, 16) % self.config['events']['thread_pool_size'] + 1
            log.debug("%s: Adding event to queue %d -> %s", __name__, queue, tag)
            try:
                self.cursor.execute(
                    'INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);',
                    (data.get("id"), json.dumps({'tag': tag, 'data': data}), queue)
                )
                self.counters[queue] += 1
                self.attempt_commit()
            except Exception as err:
                log.error("%s: %s", __name__, err)
            finally:
                log.debug("%s: %s", __name__, self.cursor.query)
        else:
            log.debug("%s: Discarding event -> %s", __name__, tag)

    def trace_log(self):
        log.trace("%s: queues sizes -> %s", __name__, self.counters)
        log.trace("%s: tokens -> %s", __name__, self.tokens)

    def _is_salt_mine_event(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_salt_mine_update(data)

    def _is_salt_mine_update(self, data):
        return data.get("fun") == "mine.update"

    def _is_presence_ping(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_test_ping(data) and self._is_batch_mode(data)

    def _is_test_ping(self, data):
        return data.get("fun") == "test.ping"

    def _is_batch_mode(self, data):
        return data.get("metadata", {}).get("batch-mode")

    @salt.ext.tornado.gen.coroutine
    def add_event_to_queue(self, raw):
        tag, data = self.event_bus.unpack(raw, self.event_bus.serial)
        self._insert(tag, data)

    def db_keepalive(self):
        if self.connection.closed:
            log.error("%s: Diconnected from database. Trying to reconnect...", __name__)
            self._connect_to_database()

    @salt.ext.tornado.gen.coroutine
    def add_token(self):
        self.tokens = min(self.tokens + 1, self.config['commit_burst'])
        self.attempt_commit()
        self.trace_log()
        self.event_bus.io_loop.call_later(self.config['commit_interval'], self.add_token)

    def attempt_commit(self):
        """
        Committing to the database.
        """
        self.db_keepalive()
        if self.tokens > 0 and sum(self.counters) > 0:
            log.debug("%s: commit", __name__)
            self.cursor.execute(
                "NOTIFY {}, '{}';".format(
                    self.config['postgres_db']['notify_channel'],
                    ",".join([str(counter) for counter in self.counters]))
            )
            self.connection.commit()
            self.counters = [0 for i in range(0, self.config['events']['thread_pool_size'] + 1)]
            self.tokens -=1

def start(**config):
    '''
    Listen to events and write them to the Postgres database
    '''
    io_loop = salt.ext.tornado.ioloop.IOLoop(make_current=False)
    io_loop.make_current()
    event_bus = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=True,
            io_loop=io_loop)
    responder = Responder(event_bus, config)
    event_bus.set_event_handler(responder.add_event_to_queue)
    io_loop.start()
   0707010000000D000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/pillar    0707010000000E000081B400000000000000000000000160C1E96E00000147000000000000000000000000000000000000002900000000susemanager-sls/modules/pillar/README.md  Overview
========

1. In the "/etc/salt/master" add the following:

   extension_modules: /path/to/the/extension_pillar_modules

2. Copy *.py from this directory to the `extension_modules` directory.

3. Then, in the "/etc/salt/master" add the following:

   ext_pillar:
     - suma_minion: /another/path/with/the/pillar/files
 0707010000000F000081B400000000000000000000000160C1E96E00003E39000000000000000000000000000000000000002E00000000susemanager-sls/modules/pillar/suma_minion.py # -*- coding: utf-8 -*-
'''
Retrieve SUSE Manager pillar data for a minion_id.
- Adds generated and static SUSE Manager pillar data.
- Adds formula pillar data.

.. code-block:: yaml

    ext_pillar:
      - suma_minion: True

'''

# Import python libs
from __future__ import absolute_import
from enum import Enum
import os
import logging
import yaml
import json
import sys
import re
import salt.utils.dictupdate
import salt.utils.stringutils

# SUSE Manager static pillar paths:
MANAGER_STATIC_PILLAR_DATA_PATH = '/usr/share/susemanager/pillar_data'
MANAGER_PILLAR_DATA_PATH = '/srv/susemanager/pillar_data'

# SUSE Manager formulas paths:
MANAGER_FORMULAS_METADATA_MANAGER_PATH = '/usr/share/susemanager/formulas/metadata'
MANAGER_FORMULAS_METADATA_STANDALONE_PATH = '/usr/share/salt-formulas/metadata'
CUSTOM_FORMULAS_METADATA_PATH = '/srv/formula_metadata'
FORMULAS_DATA_PATH = '/srv/susemanager/formula_data'
FORMULA_ORDER_FILE = FORMULAS_DATA_PATH + '/formula_order.json'

# OS images path:
IMAGES_DATA_PATH = os.path.join(MANAGER_PILLAR_DATA_PATH, 'images')

# SUSE Manager static pillar data.
MANAGER_STATIC_PILLAR = [
    'gpgkeys'
]

MANAGER_GLOBAL_PILLAR = [
    'mgr_conf'
]

MINION_PILLAR_FILES_PREFIX = "pillar_{minion_id}"
MINION_PILLAR_FILES_SUFFIXES = [".yml", "_group_memberships.yml", "_virtualization.yml"]

CONFIG_FILE = '/etc/rhn/rhn.conf'

formulas_metadata_cache = dict()

# Fomula group subtypes
class EditGroupSubtype(Enum):
    PRIMITIVE_LIST = "PRIMITIVE_LIST"
    PRIMITIVE_DICTIONARY = "PRIMITIVE_DICTIONARY"
    LIST_OF_DICTIONARIES = "LIST_OF_DICTIONARIES"
    DICTIONARY_OF_DICTIONARIES = "DICTIONARY_OF_DICTIONARIES"

# Set up logging
log = logging.getLogger(__name__)


def __virtual__():
    '''
    Ensure the pillar module name.
    '''
    return True

def ext_pillar(minion_id, *args):
    '''
    Find SUMA-related pillars for the registered minions and return the data.
    '''

    log.debug('Getting pillar data for the minion "{0}"'.format(minion_id))
    ret = {}

    # Including SUSE Manager static pillar data
    for static_pillar in MANAGER_STATIC_PILLAR:
        static_pillar_filename = os.path.join(MANAGER_STATIC_PILLAR_DATA_PATH, static_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(static_pillar_filename)).read(), Loader=yaml.FullLoader))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(static_pillar_filename, exc))

    # Including SUSE Manager global pillar data
    for global_pillar in MANAGER_GLOBAL_PILLAR:
        global_pillar_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, global_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(global_pillar_filename)).read(), Loader=yaml.FullLoader))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(global_pillar_filename, exc))

    # Including generated pillar data for this minion
    minion_pillar_filename_prefix = MINION_PILLAR_FILES_PREFIX.format(minion_id=minion_id)
    for suffix in MINION_PILLAR_FILES_SUFFIXES:
        data_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, minion_pillar_filename_prefix + suffix)
        if os.path.exists(data_filename):
            try:
                ret = salt.utils.dictupdate.merge(
                        ret,
                        yaml.load(open(data_filename).read(), Loader=yaml.FullLoader),
                        strategy='recurse')
            except Exception as error:
                log.error('Error accessing "{pillar_file}": {message}'.format(pillar_file=data_filename, message=str(error)))

    # Including formulas into pillar data
    try:
        ret.update(formula_pillars(minion_id, ret.get("group_ids", [])))
    except Exception as error:
        log.error('Error accessing formula pillar data: {message}'.format(message=str(error)))

    # Including images pillar
    try:
        ret.update(image_pillars(minion_id, ret.get("group_ids", []), ret.get("org_id", 1)))
    except Exception as error:
        log.error('Error accessing image pillar data: {}'.format(str(error)))

    return ret


def load_formulas_from_file(formula_filename):
    formulas = {}
    formula_file = os.path.join(FORMULAS_DATA_PATH, formula_filename)
    if os.path.exists(formula_file):
        try:
            with open(formula_file) as f:
                formulas = json.load(f)
        except Exception as error:
            log.error('Error loading formulas from file: {message}'.format(message=str(error)))
    return formulas


def formula_pillars(minion_id, group_ids):
    '''
    Find formula pillars for the minion, merge them and return the data.
    '''
    pillar = {}
    out_formulas = []

    # Loading group formulas
    data = load_formulas_from_file("group_formulas.json")
    for group in group_ids:
        for formula in data.get(str(group), []):
            formula_utf8 = salt.utils.stringutils.to_str(formula)
            formula_metadata = load_formula_metadata(formula)
            if formula_metadata.get("type", "") != "cluster-formula":
                # a minion can be in multiple cluster groups, each group with its own cluster-formulas
                # in such a case we want to merge all values from cluster-formulas
                # the values of the formula will be under different keys, mgr_clusters:cluster1:.., mgr_clusters:cluster2:...
                if formula_utf8 in out_formulas:
                    continue # already processed
            out_formulas.append(formula_utf8)
            pillar = salt.utils.dictupdate.merge(pillar,
                     load_formula_pillar(minion_id, group, formula, formula_metadata),
                     strategy='recurse')

    # Loading minion formulas
    data = load_formulas_from_file("minion_formulas.json")
    for formula in data.get(str(minion_id), []):
        formula_utf8 = salt.utils.stringutils.to_str(formula)
        if formula_utf8 in out_formulas:
            continue # already processed
        out_formulas.append(formula_utf8)
        pillar = salt.utils.dictupdate.merge(pillar,
                 load_formula_pillar(minion_id, None, formula),
                 strategy='recurse')

    # Loading the formula order
    if os.path.exists(FORMULA_ORDER_FILE):
        with open(FORMULA_ORDER_FILE) as ofile:
            order = json.load(ofile)
            pillar["formulas"] = list(filter(lambda i: i in out_formulas, order))
    else:
        pillar["formulas"] = out_formulas

    return pillar


def load_formula_pillar(minion_id, group_id, formula_name, formula_metadata = None):
    '''
    Load the data from a specific formula for a minion in a specific group, merge and return it.
    '''
    layout_filename = os.path.join( MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "form.yml")
    if not os.path.isfile(layout_filename):
        layout_filename = os.path.join(MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "form.yml")
        if not os.path.isfile(layout_filename):
            layout_filename = os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "form.yml")
            if not os.path.isfile(layout_filename):
                log.error('Error loading data for formula "{formula}": No form.yml found'.format(formula=formula_name))
                return {}

    group_filename = os.path.join(FORMULAS_DATA_PATH, "group_pillar", "{id}_{name}.json".format(id=group_id, name=formula_name)) if group_id is not None else None
    system_filename = os.path.join(FORMULAS_DATA_PATH, "pillar", "{id}_{name}.json".format(id=minion_id, name=formula_name))

    try:
        layout = yaml.load(open(layout_filename).read(), Loader=yaml.FullLoader)
        group_data = json.load(open(group_filename)) if group_filename is not None and os.path.isfile(group_filename) else {}
        system_data = json.load(open(system_filename)) if os.path.isfile(system_filename) else {}
    except Exception as error:
        log.error('Error loading data for formula "{formula}": {message}'.format(formula=formula_name, message=str(error)))
        return {}

    # if group_data starts with mgr_clusters then merge and adjust without the mgr_clusters:<cluster>:settings prefix
    cluster_name = None
    cluster_pillar_key = None
    if formula_metadata and formula_metadata.get("type", "") == "cluster-formula":
        if "cluster_pillar_key" not in formula_metadata:
            log.error("No 'cluster_pillar_key' in metadata of formula {}".format(formula_name))
        else:    
            cluster_pillar_key = formula_metadata["cluster_pillar_key"]
            group_data, cluster_name = _pillar_value_by_path(group_data, "mgr_clusters:*:{}".format(cluster_pillar_key))

    merged_data = merge_formula_data(layout, group_data, system_data)
    merged_data = adjust_empty_values(layout, merged_data)

    # put back data under cluster pillar namespace
    if cluster_name:
        merged_data = {"mgr_clusters": {cluster_name: {cluster_pillar_key: merged_data}}}

    return merged_data


def merge_formula_data(layout, group_data, system_data, scope="system"):
    '''
    Merge the group and system formula data, respecting the scope of a value.
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_scope = element.get("$scope", scope)
        value = None

        if element.get("$type", "text") in ["group", "hidden-group", "namespace"]:
            value = merge_formula_data(element, group_data.get(element_name, {}), system_data.get(element_name, {}), element_scope)
        # edit-group is handled as primitive element - use either system_data or group data, no merging
        elif element_scope == "system":
            value = system_data.get(element_name, group_data.get(element_name, element.get("$default", element.get("$placeholder", ""))))
        elif element_scope == "group":
            value = group_data.get(element_name, element.get("$default", element.get("$placeholder", "")))
        elif element_scope == "readonly":
            value = element.get("$default", element.get("$placeholder", ""))

        ret[element_name] = value
    return ret


def adjust_empty_values(layout, data):
    '''
    Adjust empty values in formula data
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_type = element.get("$type", "text")
        value = data.get(element_name, "")

        if element_type in ["group", "hidden-group", "namespace"]:
            value = adjust_empty_values(element, data.get(element_name, {}))
        elif element_type in ["edit-group"]:
            prototype = element.get("$prototype")
            subtype = get_edit_group_subtype(element)
            if subtype is EditGroupSubtype.DICTIONARY_OF_DICTIONARIES:
                value = {}
                if isinstance(data.get(element_name), dict):
                    for key, entry in list(data.get(element_name).items()):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value[key] = proc_entry
            elif subtype is EditGroupSubtype.LIST_OF_DICTIONARIES:
                value = []
                if isinstance(data.get(element_name), list):
                    for entry in data.get(element_name):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value.append(proc_entry)

        if not value and '$ifEmpty' in element:
            value = element.get("$ifEmpty")

        if value or not element.get("$optional"):
            ret[element_name] = value
    return ret

def get_edit_group_subtype(element):
    if element is not None and element.get("$prototype"):
        prototype = element.get("$prototype")
        if prototype.get("$key") is None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_LIST
        if prototype.get("$key") is not None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_DICTIONARY
        if prototype.get("$key") is None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.LIST_OF_DICTIONARIES
        if prototype.get("$key") is not None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.DICTIONARY_OF_DICTIONARIES
    return None

def image_pillars(minion_id, group_ids, org_id):
    '''
    Load image pillars

    Image pillars are automatically created after image build and are available to all minions
    '''
    ret = {}
    group_dirs = []
    org_dirs = []

    for pillar in os.listdir(IMAGES_DATA_PATH):
        pillar_path = os.path.join(IMAGES_DATA_PATH, pillar)

        # read also pilars from top dir, for backward compatibility
        if os.path.isfile(pillar_path) and pillar.endswith('.sls'):
            try:
                with open(pillar_path) as p:
                    ret = salt.utils.dictupdate.merge(ret, yaml.load(p.read(), Loader=yaml.FullLoader), strategy='recurse')
            except Exception as error:
                log.error('Error loading data for image "{image}": {message}'.format(image=pillar.path(), message=str(error)))

        elif os.path.isdir(pillar_path):
            if pillar.startswith('org') and int(pillar[3:]) == org_id:
                org_dirs.append(pillar_path)
            elif pillar.startswith('group') and int(pillar[5:]) in group_ids:
                group_dirs.append(pillar_path)

    for pillar_dir in org_dirs + group_dirs:
        for pillar in os.listdir(pillar_dir):
            pillar_path = os.path.join(pillar_dir, pillar)
            if os.path.isfile(pillar_path) and pillar.endswith('.sls'):
                try:
                    with open(pillar_path) as p:
                        ret = salt.utils.dictupdate.merge(ret, yaml.load(p.read(), Loader=yaml.FullLoader), strategy='recurse')
                except Exception as error:
                    log.error('Error loading data for image "{image}": {message}'.format(image=pillar.path(), message=str(error)))

    return ret

def load_formula_metadata(formula_name):
    if formula_name in formulas_metadata_cache:
        return formulas_metadata_cache[formula_name]

    metadata_filename = None
    metadata_paths_ordered = [
        os.path.join(MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "metadata.yml"),
        os.path.join(MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "metadata.yml"),
        os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "metadata.yml")
    ]
    
    # Take the first metadata file that exist
    for mpath in metadata_paths_ordered:
        if os.path.isfile(mpath):
            metadata_filename = mpath
            break
            
    if not metadata_filename:             
        log.error('Error loading metadata for formula "{formula}": No metadata.yml found'.format(formula=formula_name))
        return {}
    try:
        metadata = yaml.load(open(metadata_filename).read())
    except Exception as error:
        log.error('Error loading data for formula "{formula}": {message}'.format(formula=formula_name, message=str(error)))
        return {}

    formulas_metadata_cache[formula_name] = metadata                 
    return metadata            

def _pillar_value_by_path(data, path):
    result = data
    first_key = None
    for token in path.split(":"):
        if token == "*":
            first_key = next(iter(result))
            result = result[first_key] if first_key else None
        elif token in result:
            result = result[token]
        else:
            break
    return result, first_key
   07070100000010000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/modules/runners   07070100000011000081B400000000000000000000000160C1E96E000005ED000000000000000000000000000000000000003600000000susemanager-sls/modules/runners/kiwi-image-collect.py # SUSE Manager
# Copyright (c) 2018--2020 SUSE LLC

# runner to collect image from build host

import os
import logging

log = logging.getLogger(__name__)

def upload_file_from_minion(minion, minion_ip, filetoupload, targetdir):
    fqdn = __salt__['cache.grains'](tgt=minion).get(minion, {}).get('fqdn')
    log.info('Collecting image "{}" from minion {} (FQDN: {}, IP: {})'.format(filetoupload, minion, fqdn, minion_ip))
    if not fqdn or fqdn == 'localhost':
        fqdn = minion_ip
    src = 'root@{}:{}'.format(fqdn, filetoupload)
    return __salt__['salt.cmd'](
      'rsync.rsync',
      src, targetdir,
      rsh='ssh -o IdentityFile=/srv/susemanager/salt/salt_ssh/mgr_ssh_id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
    )

def move_file_from_minion_cache(minion, filetomove, targetdir):
    src = os.path.join(__opts__['cachedir'], 'minions', minion, 'files', filetomove.lstrip('/'))
    log.info('Collecting image from minion cache "{}"'.format(src))
    # file.move throws an exception in case of error
    return __salt__['salt.cmd']('file.move', src, targetdir)

def kiwi_collect_image(minion, minion_ip, filepath, image_store_dir):
    __salt__['salt.cmd']('file.mkdir', image_store_dir)

    use_salt_transport = __salt__['cache.pillar'](tgt=minion).get(minion, {}).get('use_salt_transport')
    if use_salt_transport:
        return move_file_from_minion_cache(minion, filepath, image_store_dir)

    return upload_file_from_minion(minion, minion_ip, filepath, image_store_dir)
   07070100000012000081B400000000000000000000000160C1E96E00000757000000000000000000000000000000000000002A00000000susemanager-sls/modules/runners/mgrk8s.py from salt.exceptions import SaltInvocationError

try:
    from kubernetes import client, config # pylint: disable=import-self
    from kubernetes.config import new_client_from_config
    from kubernetes.client.rest import ApiException
    from urllib3.exceptions import HTTPError
    IS_VALID = True
except ImportError as ex:
    IS_VALID = False


def __virtual__():
    return IS_VALID


def get_all_containers(kubeconfig=None, context=None):
    '''
    Retrieve information about all containers running in a Kubernetes cluster.

    :param kubeconfig: path to kubeconfig file
    :param context: context inside kubeconfig
    :return:
    .. code-block:: json
       {
            "containers": [
                {
                    "image_id": "(docker-pullable://)?some/image@sha256:hash....",
                    "image": "myregistry/some/image:v1",
                    "container_id": "(docker|cri-o)://...hash...",
                    "pod_name": "kubernetes-pod",
                    "pod_namespace": "pod-namespace"
                }
       }
    '''
    if not kubeconfig:
        raise SaltInvocationError('kubeconfig is mandatory')

    if not context:
        raise SaltInvocationError('context is mandatory')

    api_client = new_client_from_config(kubeconfig, context)
    api = client.CoreV1Api(api_client)
    pods = api.list_pod_for_all_namespaces(watch=False)
    output = dict(containers=[])
    for pod in pods.items:
        for container in pod.status.container_statuses:
            res_cont = dict()
            res_cont['container_id'] = container.container_id
            res_cont['image'] = container.image
            res_cont['image_id'] = container.image_id
            res_cont['pod_name'] = pod.metadata.name
            res_cont['pod_namespace'] = pod.metadata.namespace
            output['containers'].append(res_cont)

    return output
 07070100000013000081B400000000000000000000000160C1E96E00001119000000000000000000000000000000000000002B00000000susemanager-sls/modules/runners/mgrutil.py    from subprocess import Popen, PIPE
import logging
import stat
import grp
import shlex
import os
import shutil
import salt.utils

log = logging.getLogger(__name__)

GROUP_OWNER = 'susemanager'


def delete_rejected_key(minion):
    '''
    Delete a previously rejected minion key from minions_rejected
    :param minion: the minion id to look for
    :return: map containing returncode and stdout/stderr
    '''
    path_rejected = "/etc/salt/pki/master/minions_rejected/"
    path = os.path.normpath(path_rejected + minion)
    if not path.startswith(path_rejected):
        return {"returncode": -1, "stderr": "Unexpected path: " + path}
    if os.path.isfile(path):
        cmd = ['rm', path]
        return _cmd(cmd)
    return {"returncode": 0}


def ssh_keygen(path):
    '''
    Generate SSH keys using the given path.
    :param path: the path
    :return: map containing returncode and stdout/stderr
    '''
    if os.path.isfile(path):
        return {"returncode": -1, "stderr": "Key file already exists"}
    cmd = ['ssh-keygen', '-N', '', '-f', path, '-t', 'rsa', '-q']
    # if not os.path.isdir(os.path.dirname(path)):
    #     os.makedirs(os.path.dirname(path))
    return _cmd(cmd)


def chain_ssh_cmd(hosts=None, clientkey=None, proxykey=None, user="root", options=None, command=None, outputfile=None):
    '''
    Chain ssh calls over one or more hops to run a command on the last host in the chain.
    :param hosts:
    :param clientkey:
    :param proxykey:
    :param user:
    :param options:
    :param command:
    :param outputfile:
    :return:
    '''
    cmd = []
    for idx, hostname in enumerate(hosts):
        key = clientkey if idx == 0 else proxykey
        opts = " ".join(["-o {}={}".format(opt, val) for opt, val in list(options.items())])
        ssh = "/usr/bin/ssh -i {} {} -o User={} {}"\
            .format(key, opts, user, hostname)
        cmd.extend(shlex.split(ssh))
    cmd.append(command)
    ret = _cmd(cmd)
    if outputfile:
        with open(outputfile, "w") as out:
            out.write(ret["stdout"])
    return ret

def remove_ssh_known_host(user, hostname):
    return __salt__['salt.cmd']('ssh.rm_known_host', user, hostname)


def _cmd(cmd):
    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
    stdout, stderr = p.communicate()
    return {"returncode": p.returncode, "stdout": salt.utils.stringutils.to_unicode(stdout), "stderr": salt.utils.stringutils.to_unicode(stderr)}


def move_minion_uploaded_files(minion=None, dirtomove=None, basepath=None, actionpath=None):
    srcdir = os.path.join(__opts__['cachedir'], "minions", minion, 'files', dirtomove.lstrip('/'))
    scapstorepath = os.path.join(basepath, actionpath)
    susemanager_gid = grp.getgrnam(GROUP_OWNER).gr_gid
    if not os.path.exists(scapstorepath):
        log.debug("Creating action directory: {0}".format(scapstorepath))
        try:
            os.makedirs(scapstorepath)
        except Exception as err:
            log.error('Failed to create dir {0}'.format(scapstorepath), exc_info=True)
            return {False: 'Salt failed to create dir {0}: {1}'.format(scapstorepath, str(err))}
        # change group permissions to rwx and group owner to susemanager
        mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
        subdirs = actionpath.split('/')
        for idx in range(1, len(subdirs)):
            if subdirs[0: idx] != '':
                # ignore errors. If dir has owner != salt then chmod fails but the dir
                # might still have the correct group owner
                try:
                    os.chmod(os.path.join(basepath, *subdirs[0: idx]), mode)
                except OSError:
                    pass
                try:
                    os.chown(os.path.join(basepath, *subdirs[0: idx]), -1, susemanager_gid)
                except OSError:
                    pass

    try:
        # move the files to the scap store dir
        for fl in os.listdir(srcdir):
            shutil.move(os.path.join(srcdir, fl), scapstorepath)
        # change group owner to susemanager
        for fl in os.listdir(scapstorepath):
            os.chown(os.path.join(scapstorepath, fl), -1, susemanager_gid)
    except Exception as err:
        log.error('Salt failed to move {0} -> {1}'.format(srcdir, scapstorepath), exc_info=True)
        return {False: str(err)}
    return {True: scapstorepath}

   07070100000014000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001D00000000susemanager-sls/modules/tops  07070100000015000081B400000000000000000000000160C1E96E000004E4000000000000000000000000000000000000003000000000susemanager-sls/modules/tops/mgr_master_tops.py   # -*- coding: utf-8 -*-
'''
SUSE Manager master_tops module
-------------------------------

This module provides the base states top information from SUSE Manager.

The top information returned by this module is merged by Salt with the
user custom data provided in /srv/salt/top.sls file.

.. code-block:: yaml

    master_tops:
      mgr_master_tops: True
'''

# Import python libs
from __future__ import absolute_import
import logging

# Define the module's virtual name
__virtualname__ = 'mgr_master_tops'

log = logging.getLogger(__name__)

MANAGER_BASE_TOP = [
    "channels",
    "certs",
    "packages",
    "custom",
    "custom_groups",
    "custom_org",
    "formulas",
    "services.salt-minion",
    "services.docker",
    "services.kiwi-image-server"
]


def __virtual__():
    '''
    Ensure the module name.
    '''
    return __virtualname__


def top(**kwargs):
    '''
    Returns the SUSE Manager top state information of a minion
    for the `base` salt environment.
    '''
    env = kwargs['opts'].get('environment') or kwargs['opts'].get('saltenv')
    if env in [None, "base"]:
        log.debug('Loading SUSE Manager TOP state information for the "base" environment')
        return {"base": MANAGER_BASE_TOP}
    return None
07070100000016000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001C00000000susemanager-sls/pillar_data   07070100000017000081B400000000000000000000000160C1E96E000001A2000000000000000000000000000000000000002800000000susemanager-sls/pillar_data/gpgkeys.yml   gpgkeys:
  res6tools:
    name: gpg-pubkey-307e3d54
    file: sle11-gpg-pubkey-307e3d54.key
  res7tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res8tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res:
    name: gpg-pubkey-0182b964
    file: res-gpg-pubkey-0182b964.key
  ubuntutools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  07070100000018000081B400000000000000000000000160C1E96E0000139D000000000000000000000000000000000000001900000000susemanager-sls/pylintrc  # susemanager-sls package pylint configuration

[MASTER]

# Profiled execution.
profile=no

# Pickle collected data for later comparisons.
persistent=no


[MESSAGES CONTROL]

# Disable the message(s) with the given id(s).


disable=I0011,
	C0302,
	C0111,
	R0801,
	R0902,
	R0903,
	R0904,
	R0912,
	R0913,
	R0914,
	R0915,
	R0921,
	R0922,
	W0142,
	W0403,
	W0603,
	C1001,
	W0121,
	useless-else-on-loop,
	bad-whitespace,
	unpacking-non-sequence,
	superfluous-parens,
	cyclic-import,
	redefined-variable-type,
	no-else-return,

        # Uyuni disabled
	E0203,
	E0611,
	E1101,
	E1102

# list of disabled messages:
#I0011: 62: Locally disabling R0201
#C0302:  1: Too many lines in module (2425)
#C0111:  1: Missing docstring
#R0902: 19:RequestedChannels: Too many instance attributes (9/7)
#R0903:  Too few public methods
#R0904: 26:Transport: Too many public methods (22/20)
#R0912:171:set_slots_from_cert: Too many branches (59/20)
#R0913:101:GETServer.__init__: Too many arguments (11/10)
#R0914:171:set_slots_from_cert: Too many local variables (38/20)
#R0915:171:set_slots_from_cert: Too many statements (169/50)
#W0142:228:MPM_Package.write: Used * or ** magic
#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog'
#W0603: 72:initLOG: Using the global statement
# for pylint-1.0 we also disable
#C1001: 46, 0: Old-style class defined. (old-style-class)
#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax)
#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop)
# pylint-1.1 checks
#C:334, 0: No space allowed after bracket (bad-whitespace)
#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence)
#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens)
#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens)

[REPORTS]

# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable

# Include message's id in output
include-ids=yes

# Tells whether to display a full report or only the messages
reports=yes

# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"

[VARIABLES]

# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy


[BASIC]

# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$

# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$

# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$

# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$

# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input


[DESIGN]

# Maximum number of arguments for function / method
max-args=10

# Maximum number of locals for function / method body
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6

# Maximum number of branch for function / method body
max-branchs=20

# Maximum number of statements in function / method body
max-statements=50

# Maximum number of parents for a class (see R0901).
max-parents=7

# Maximum number of attributes for a class (see R0902).
max-attributes=7

# Minimum number of public methods for a class (see R0903).
min-public-methods=1

# Maximum number of public methods for a class (see R0904).
max-public-methods=20


[CLASSES]


[FORMAT]

# Maximum number of characters on a single line.
max-line-length=120

# Maximum number of lines in a module
max-module-lines=1000

# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string='    '


[MISCELLANEOUS]

# List of note tags to take in consideration, separated by a comma.
notes=
   07070100000019000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001800000000susemanager-sls/reactor   0707010000001A000081B400000000000000000000000160C1E96E0000008A000000000000000000000000000000000000003000000000susemanager-sls/reactor/resume_action_chain.sls   resume_actionchain_execution:
  local.mgractionchains.resume:
    - tgt: {{ data['id'] }}
    - metadata:
        suma-action-chain: True
  0707010000001B000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001500000000susemanager-sls/salt  0707010000001C000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002200000000susemanager-sls/salt/actionchains 0707010000001D000081B400000000000000000000000160C1E96E000002BF000000000000000000000000000000000000003A00000000susemanager-sls/salt/actionchains/force_restart_minion.sh #!/bin/bash
if [ "$(readlink /proc/1/exe)" = "/sbin/init" ]; then
   # SysV, use pid ctime as service start time
   T0=$(date -d "$(stat -c '%z' /var/run/salt-minion.pid | sed -E 's/(.*) (\+|\-)(.*)/\1/g')" "+%s")
   RESTART_MINION="/usr/sbin/rcsalt-minion restart"
else
   # systemd
   TIME=$(systemctl show salt-minion --property=ActiveEnterTimestamp)
   TIME="${TIME//ActiveEnterTimestamp=/}"
   T0=$(date -d "$TIME" '+%s')
   RESTART_MINION="systemctl restart salt-minion"
fi

T1=$(date '+%s')
echo "salt-minion service uptime: $(( T1-T0 )) seconds"
if (( (T1-T0) > 5 )); then
   echo "Patch to update salt-minion was installed but service was not restarted. Forcing restart."
   $RESTART_MINION
fi
 0707010000001E000081B400000000000000000000000160C1E96E00000119000000000000000000000000000000000000003000000000susemanager-sls/salt/actionchains/resumessh.sls   resumessh:
    mgrcompat.module_run:
    -   name: mgractionchains.resume
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}

include:
  - util.synccustomall
   0707010000001F000081B400000000000000000000000160C1E96E00000151000000000000000000000000000000000000002F00000000susemanager-sls/salt/actionchains/startssh.sls    startssh:
    mgrcompat.module_run:
    -   name: mgractionchains.start
    -   actionchain_id: {{ pillar.get('actionchain_id')}}
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}

include:
  - util.synccustomall
   07070100000020000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/salt/bootloader   07070100000021000081FD00000000000000000000000160C1E96E00000255000000000000000000000000000000000000003900000000susemanager-sls/salt/bootloader/42_uyuni_reinstall.templ  #!/bin/sh
set -e

. "$pkgdatadir/grub-mkconfig_lib"

rel_dirname=`make_system_path_relative_to_its_root /boot`

echo "menuentry \"{{ pillar.get('uyuni-reinstall-name') }}\" {"
if [ -d /sys/firmware/efi ] && [ "x${GRUB_USE_LINUXEFI}" = "xtrue" ]; then
    echo "    linuxefi ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('uyuni-reinstall-kopts') }}"
    echo "    initrdefi ${rel_dirname}/uyuni-reinstall-initrd"
else
    echo "    linux ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('uyuni-reinstall-kopts') }}"
    echo "    initrd ${rel_dirname}/uyuni-reinstall-initrd"
fi
echo "}"

   07070100000022000081B400000000000000000000000160C1E96E00000ACF000000000000000000000000000000000000003000000000susemanager-sls/salt/bootloader/autoinstall.sls   {% if pillar['uyuni-reinstall-kernel'] and pillar['uyuni-reinstall-initrd'] %}
mgr_copy_kernel:
  file.managed:
    - name: /boot/uyuni-reinstall-kernel
    - source: salt://bootloader/{{ pillar.get('uyuni-reinstall-kernel') }}

mgr_copy_initrd:
  file.managed:
    - name: /boot/uyuni-reinstall-initrd
    - source: salt://bootloader/{{ pillar.get('uyuni-reinstall-initrd') }}

{% set loader_type = salt['cmd.run']('if [ -f /etc/sysconfig/bootloader ]; then source /etc/sysconfig/bootloader 2> /dev/null; fi;
if [ -z "${LOADER_TYPE}" ]; then
if [ $(which grubonce 2> /dev/null) ] && [ !$(which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="grub";
elif [ $(which elilo 2> /dev/null) ] && [ !$(which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="elilo";
fi;
fi; echo "${LOADER_TYPE}"', python_shell=True) %}
{% if loader_type == 'grub' %}
mgr_create_grub_entry:
  file.append:
    - name: /boot/grub/menu.lst
    - template: jinja
    - source: salt://bootloader/grub1_uyuni_reinstall.templ
    - require:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd

mgr_grub_boot_once:
  cmd.run:
    - name: grubonce "{{ pillar.get('uyuni-reinstall-name') }}"
    - onchanges:
      - file: mgr_create_grub_entry
{% elif loader_type == 'elilo' %}
mgr_create_elilo_entry:
  file.append:
    - name: /etc/elilo.conf
    - template: jinja
    - source: salt://bootloader/elilo_uyuni_reinstall.templ
    - require:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd

mgr_set_default_boot:
  file.replace:
    - name: /etc/elilo.conf
    - pattern: default = .*
    - repl: default = {{ pillar.get('uyuni-reinstall-name') }}
    - require:
      - file: mgr_create_elilo_entry

mgr_elilo_copy_config:
  cmd.run:
    - name: elilo
    - onchanges:
      - file: mgr_create_elilo_entry
      - file: mgr_set_default_boot
{% else %}
mgr_create_grub2_entry:
  file.managed:
    - name: /etc/grub.d/42_uyuni_reinstall
    - source: salt://bootloader/42_uyuni_reinstall.templ
    - template: jinja
    - mode: 0755

mgr_set_default_boot:
  file.replace:
    - name: /etc/default/grub
    - pattern: GRUB_DEFAULT=.*
    - repl: GRUB_DEFAULT={{ pillar.get('uyuni-reinstall-name') }}
    - require:
      - file: mgr_create_grub2_entry

mgr_generate_grubconf:
  cmd.run:
    - name: grub2-mkconfig -o /boot/grub2/grub.cfg
    - onchanges:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd
      - file: mgr_create_grub2_entry
      - file: mgr_set_default_boot
{% endif %}

mgr_autoinstall_start:
  cmd.run:
    - name: shutdown -r +1
    - require:
{% if loader_type == 'grub' %}
      - cmd: mgr_grub_boot_once
{% elif loader_type == 'elilo' %}
      - cmd: mgr_elilo_copy_config
{% else %}
      - cmd: mgr_generate_grubconf
{% endif %}

{% endif %}
 07070100000023000081B400000000000000000000000160C1E96E000000DA000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/elilo_uyuni_reinstall.templ   
image = uyuni-reinstall-kernel
###Created for automated reinstallation
    label = {{ pillar.get('uyuni-reinstall-name') }}
    append = "{{ pillar.get('uyuni-reinstall-kopts') }}"
    initrd = uyuni-reinstall-initrd
  07070100000024000081B400000000000000000000000160C1E96E000000CC000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/grub1_uyuni_reinstall.templ   
###Created for automated reinstallation
title {{ pillar.get('uyuni-reinstall-name') }}
kernel /boot/uyuni-reinstall-kernel {{ pillar.get('uyuni-reinstall-kopts') }}
  initrd /boot/uyuni-reinstall-initrd
07070100000025000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001F00000000susemanager-sls/salt/bootstrap    07070100000026000081B400000000000000000000000160C1E96E000001D1000000000000000000000000000000000000002E00000000susemanager-sls/salt/bootstrap/bootstrap.repo # SUSE Manager bootstrap repository
# Do not edit this file, changes will be overwritten
{%- if grains['os_family'] == 'Debian' %}
deb [trusted=yes] {{bootstrap_repo_url}} bootstrap main
{%- else %}
[SUSE-Manager-Bootstrap]
name=SUSE-Manager-Bootstrap
type=rpm-md
baseurl={{bootstrap_repo_url}}
gpgcheck=0
enabled=1
autorefresh=1
keeppackages=0
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 8 %}
module_hotfixes=1
{%- endif %}
{%- endif %}
   07070100000027000081B400000000000000000000000160C1E96E00001BDC000000000000000000000000000000000000002800000000susemanager-sls/salt/bootstrap/init.sls   # Make sure no SUSE Manager server aliasing left over from ssh-push via tunnel
mgr_server_localhost_alias_absent:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

# disable all susemanager:* repos
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

{% set os_base = 'sle' %}
# CentOS6 oscodename is bogus
{%- if "centos" in grains['os']|lower %}
{% set os_base = 'centos' %}
{%- elif "redhat" in grains['os']|lower %}
{% set os_base = 'res' %}
{%- elif "opensuse" in grains['oscodename']|lower %}
{% set os_base = 'opensuse' %}
{%- endif %}

{%- if grains['os_family'] == 'Suse' %}
{%- if "." in grains['osrelease'] %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osrelease'].replace('.', '/') ~ '/bootstrap/' %}
{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osrelease'] ~ '/0/bootstrap/' %}
{%- endif %}

{%- elif grains['os_family'] == 'RedHat' %}
{%- if salt['file.file_exists']('/etc/oracle-release') %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/oracle/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}

{%- elif salt['file.file_exists']('/usr/share/doc/sles_es-release') %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/res/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}

{%- elif salt['file.file_exists']('/etc/centos-release') %}
{# We try CentOS bootstrap repository first, if not avaiable then fallback to RES #}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/centos/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{% set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
{%- if bootstrap_repo_request['status'] == 901 %}
{{ raise(bootstrap_repo_request['error']) }}
{%- elif not (0 < bootstrap_repo_request['status'] < 300) %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/res/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{%- endif %}

{%- elif salt['file.file_exists']('/etc/redhat-release') %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/res/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}

{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{%- endif %}

{%- elif grains['os_family'] == 'Debian' %}
{%- set osrelease = grains['osrelease'].split('.') %}
{%- if grains['os'] == 'Ubuntu' %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/ubuntu/' ~ osrelease[0] ~ '/' ~ osrelease[1].lstrip('0') ~ '/bootstrap/' %}
{%- elif grains['os'] == 'AstraLinuxCE' %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/astra/' ~ grains['oscodename'] ~ '/bootstrap/' %}
{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/debian/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{%- endif %}
{%- endif %}

{%- if not grains['os_family'] == 'Debian' %}

{%- set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
{# 901 is a special status code for the TLS issue with RHEL6 and SLE11. #}
{%- if bootstrap_repo_request['status'] == 901 %}
{{ raise(bootstrap_repo_request['error']) }}
{%- endif %}
{%- set bootstrap_repo_exists = (0 < bootstrap_repo_request['status'] < 300) %}

bootstrap_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: /etc/zypp/repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'RedHat' %}
    - name: /etc/yum.repos.d/susemanager:bootstrap.repo
{%- endif %}
    - source:
      - salt://bootstrap/bootstrap.repo
    - template: jinja
    - context:
      bootstrap_repo_url: {{bootstrap_repo_url}}
    - mode: 644
    - require:
      - host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
      - mgrcompat: disable_repo_*
{%- endif %}
    - onlyif:
      - ([ {{ bootstrap_repo_exists }} = "True" ])

{%- else %}
{%- set bootstrap_repo_exists = (0 < salt['http.query'](bootstrap_repo_url + 'dists/bootstrap/Release', status=True, verify_ssl=False)['status'] < 300) %}
bootstrap_repo:
  file.managed:
    - name: /etc/apt/sources.list.d/susemanager_bootstrap.list
    - source:
      - salt://bootstrap/bootstrap.repo
    - template: jinja
    - context:
      bootstrap_repo_url: {{bootstrap_repo_url}}
    - mode: 644
    - require:
      - host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
      - mgrcompat: disable_repo_*
{%- endif %}
    - onlyif:
      - ([ {{ bootstrap_repo_exists }} = "True" ])
{%- endif %}

{% include 'channels/gpg-keys.sls' %}

salt-minion-package:
  pkg.installed:
    - name: salt-minion
    - install_recommends: False
    - require:
      - file: bootstrap_repo

/etc/salt/minion.d/susemanager.conf:
  file.managed:
    - source:
      - salt://bootstrap/susemanager.conf
    - template: jinja
    - mode: 644
    - require:
      - pkg: salt-minion-package

/etc/salt/minion_id:
  file.managed:
    - contents_pillar: minion_id
    - require:
      - pkg: salt-minion-package

{% include 'bootstrap/remove_traditional_stack.sls' %}

mgr_update_basic_pkgs:
  pkg.latest:
    - pkgs:
      - openssl
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] and grains['cpuarch'] in ['i586', 'x86_64'] %}
      - pmtools
{%- elif grains['cpuarch'] in ['aarch64', 'x86_64'] %}
      - dmidecode
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
      - zypper
{%- elif grains['os_family'] == 'RedHat' %}
      - yum
{%- endif %}

# Manage minion key files in case they are provided in the pillar
{% if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
/etc/salt/pki/minion/minion.pub:
  file.managed:
    - contents_pillar: minion_pub
    - mode: 644
    - makedirs: True
    - require:
      - pkg: salt-minion-package

/etc/salt/pki/minion/minion.pem:
  file.managed:
    - contents_pillar: minion_pem
    - mode: 400
    - makedirs: True
    - require:
      - pkg: salt-minion-package

salt-minion:
  service.running:
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: /etc/salt/minion_id
      - file: /etc/salt/pki/minion/minion.pem
      - file: /etc/salt/pki/minion/minion.pub
      - file: /etc/salt/minion.d/susemanager.conf
{% else %}
salt-minion:
  service.running:
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: /etc/salt/minion_id
      - file: /etc/salt/minion.d/susemanager.conf
{% endif %}
07070100000028000081B400000000000000000000000160C1E96E000007A8000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootstrap/remove_traditional_stack.sls   # disable all spacewalk:* repos
{% set repos_disabled = {'match_str': 'spacewalk:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

include:
  - util.syncstates

disable_spacewalksd:
  service.dead:
    - name: rhnsd
    - enable: False

disable_spacewalk-update-status:
  service.dead:
    - name: spacewalk-update-status
    - enable: False

disable_osad:
  service.dead:
    - name: osad
    - enable: False

remove_traditional_stack_all:
  pkg.removed:
    - pkgs:
      - spacewalk-check
      - spacewalk-client-setup
      - osad
      - osa-common
      - mgr-osad
      - spacewalksd
      - mgr-daemon
      - rhnlib
      - rhnmd
{%- if grains['os_family'] == 'Suse' %}
      - zypp-plugin-spacewalk
{%- elif grains['os_family'] == 'RedHat' %}
      - yum-rhn-plugin
      - rhnsd
      - rhn-check
      - rhn-setup
      - rhn-client-tools
{%- elif grains['os_family'] == 'Debian' %}
      - apt-transport-spacewalk
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - mgrcompat: disable_repo*
{%- endif %}

remove_traditional_stack:
  pkg.removed:
    - pkgs:
      - spacewalk-client-tools
      - rhncfg
      - mgr-cfg
{%- if grains['os_family'] == 'Suse' %}
      - suseRegisterInfo
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - mgrcompat: disable_repo*
{%- endif %}
    - unless: rpm -q spacewalk-proxy-common || rpm -q spacewalk-common

# only removing apt-transport-spacewalk above
# causes apt-get update to 'freeze' if this
# file is still present and referencing a
# method not present anymore.
{%- if grains['os_family'] == 'Debian' %}
remove_spacewalk_sources:
  file.absent:
    - name: /etc/apt/sources.list.d/spacewalk.list
{%- endif %}

# Remove suseRegisterInfo in a separate yum transaction to avoid being called by
# the yum plugin.
{%- if grains['os_family'] == 'RedHat' %}
remove_suse_register_info_rh:
  pkg.removed:
    - name: suseRegisterInfo
{%- endif %}
07070100000029000081B400000000000000000000000160C1E96E000001EE000000000000000000000000000000000000003000000000susemanager-sls/salt/bootstrap/susemanager.conf   # This file was generated by SUSE Manager
master: {{ pillar['mgr_server'] }}
server_id_use_crc: adler32
enable_legacy_startup_events: False
enable_fqdns_grains: False
{% if pillar['activation_key'] is defined %}
grains:
  susemanager:
    activation_key: {{ pillar['activation_key'] }}
{% endif %}
start_event_grains:
  - machine_id
  - saltboot_initrd
  - susemanager

# Define SALT_RUNNING env variable for pkg modules
system-environment:
  modules:
    pkg:
      _:
        SALT_RUNNING: 1
  0707010000002A000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001B00000000susemanager-sls/salt/certs    0707010000002B0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000003000000000susemanager-sls/salt/certs/AstraLinuxCEorel.sls   Debian9.sls 0707010000002C0000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/CAASP1.sls SLES12.sls  0707010000002D0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS6.sls    RedHat6.sls 0707010000002E0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS7.sls    RedHat7.sls 0707010000002F0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS8.sls    RedHat7.sls 070701000000300000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Debian10.sls   Debian9.sls 07070100000031000081B400000000000000000000000160C1E96E00000197000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/Debian9.sls    mgr_download_mgr_cert:
  file.managed:
    - name: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
    - makedirs: True
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

mgr_update_ca_certs:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
 070701000000320000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_0.sls   SLES12.sls  070701000000330000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_1.sls   SLES12.sls  070701000000340000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_2.sls   SLES12.sls  070701000000350000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_3.sls   SLES12.sls  070701000000360000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap42_2.sls   SLES12.sls  070701000000370000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap42_3.sls   SLES12.sls  070701000000380000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/OEL6.sls   RedHat6.sls 070701000000390000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/OEL7.sls   RedHat7.sls 0707010000003A0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/OEL8.sls   RedHat7.sls 0707010000003B0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES6.sls   RedHat6.sls 0707010000003C0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES7.sls   RedHat7.sls 0707010000003D0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES8.sls   RedHat7.sls 0707010000003E000081B400000000000000000000000160C1E96E0000021D000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat6.sls    enable_ca_store:
  cmd.run:
    - name: /usr/bin/update-ca-trust enable
    - runas: root
    - unless: "/usr/bin/update-ca-trust check | grep \"PEM/JAVA Status: ENABLED\""

/etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT
    - require:
      - cmd: enable_ca_store

update-ca-certificates:
  cmd.run:
    - name: /usr/bin/update-ca-trust extract
    - runas: root
    - onchanges:
      - file: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT
   0707010000003F000081B400000000000000000000000160C1E96E00000143000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat7.sls    /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

update-ca-certificates:
  cmd.run:
    - name: /usr/bin/update-ca-trust extract
    - runas: root
    - onchanges:
      - file: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT
 070701000000400000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat8.sls    RedHat7.sls 070701000000410000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLED15.sls SLES12.sls  07070100000042000081B400000000000000000000000160C1E96E00000181000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES11.sls /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

salt://certs/update-multi-cert.sh:
  cmd.wait_script:
    - runas: root
    - watch:
        - file: /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem

c_rehash:
  cmd.run:
    - name: /usr/bin/c_rehash
    - runas: root
    - onchanges:
      - file: /etc/ssl/certs/*
   07070100000043000081B400000000000000000000000160C1E96E0000012F000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES12.sls /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

update-ca-certificates:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
 070701000000440000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES15.sls SLES12.sls  070701000000450000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002A00000000susemanager-sls/salt/certs/SLES_SAP12.sls SLES12.sls  070701000000460000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SUSE12.sls SLES12.sls  070701000000470000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SUSE15.sls SLES12.sls  070701000000480000A1FF00000000000000000000000160C1E96E0000000A000000000000000000000000000000000000002A00000000susemanager-sls/salt/certs/Tumbleweed.sls SLES12.sls  070701000000490000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Ubuntu16.sls   Debian9.sls 0707010000004A0000A1FF00000000000000000000000160C1E96E0000000B000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Ubuntu18.sls   Debian9.sls 0707010000004B0000A1FF00000000000000000000000160C1E96E0000000C000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Ubuntu20.sls   Debian10.sls0707010000004C000081B400000000000000000000000160C1E96E000003F2000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/init.sls   {% macro includesls(osfullname, osrelease) -%}
{% include 'certs/{0}.sls'.format(osfullname + osrelease.replace('.', '_')) ignore missing -%}
{%- endmacro %}
{% if grains['os_family'] == 'Suse' %}
{% if grains['osfullname'] == 'openSUSE Tumbleweed' %}
{% set sls = includesls('Tumbleweed', '') -%}
{% else -%}
{% set sls = includesls(grains['osfullname'], grains['osrelease']) -%}
{% endif -%}
{% if sls|trim != "" -%}
{{ sls }}
{% else -%}
{{ includesls(grains['osfullname'], grains['osrelease_info']|first|string) }}
{% endif -%}
{% elif grains['os_family'] == 'RedHat' %}
{% set sls = includesls(grains['os'], grains['osrelease']) -%}
{% if sls|trim != "" -%}
{{ sls }}
{% else -%}
{{ includesls(grains['os'], grains['osrelease_info']|first|string) }}
{% endif -%}
{% elif grains['os_family'] == 'Debian' %}
{% if grains ['os'] == 'AstraLinuxCE' %}
{{ includesls(grains['os'], grains['oscodename']) }}
{% else %}
{{ includesls(grains['os'], grains['osrelease_info']|first|string) }}
{% endif %}
{% endif %}
  0707010000004D000081B400000000000000000000000160C1E96E0000018B000000000000000000000000000000000000003000000000susemanager-sls/salt/certs/update-multi-cert.sh   CERT_DIR=/etc/ssl/certs
CERT_FILE=RHN-ORG-TRUSTED-SSL-CERT
TRUST_DIR=/etc/ssl/certs
rm -f $TRUST_DIR/${CERT_FILE}-*.pem
if [ -f $CERT_DIR/${CERT_FILE}.pem ]; then
    if [ $(grep -- "-----BEGIN CERTIFICATE-----" $CERT_DIR/${CERT_FILE}.pem | wc -l) -gt 1 ]; then
        csplit -b "%02d.pem" -f $TRUST_DIR/${CERT_FILE}- $CERT_DIR/${CERT_FILE}.pem '/-----BEGIN CERTIFICATE-----/' '{*}'
    fi
fi

 0707010000004E000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/channels 0707010000004F000081B400000000000000000000000160C1E96E00000288000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/aptauth.conf    # susemanager.conf managed by SUSE Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {})|dictsort|reverse %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
machine {{hostname}}:{{port}}/rhn/manager/download/dists/{{ chan }} login {{ args['token'] }}
machine {{hostname}}:{{port}}/rhn/manager/download/{{ chan }} login {{ args['token'] }}
{% endfor %}
07070100000050000081B400000000000000000000000160C1E96E00000848000000000000000000000000000000000000002C00000000susemanager-sls/salt/channels/channels.repo   # Channels managed by SUSE Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {}).items() %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
{%- if grains['os'] == 'Debian' or grains['os'] == 'Ubuntu' %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_support_acd = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}

{%- if apt_support_acd %}
deb {{ '[trusted=yes]' if not pillar.get('mgr_metadata_signing_enabled', false) else '[signed-by=/usr/share/keyrings/mgr-archive-keyring.gpg]' }} {{protocol}}://{{hostname}}:{{port}}/rhn/manager/download {{ chan }} main
{%- else %}
deb {{ '[trusted=yes]' if not pillar.get('mgr_metadata_signing_enabled', false) else '[signed-by=/usr/share/keyrings/mgr-archive-keyring.gpg]' }} {{protocol}}://{{ args['token'] }}@{{hostname}}:{{port}}/rhn/manager/download {{ chan }} main
{%- endif %}
{%- else %}
[{{ args['alias'] }}]
name={{ args['name'] }}
enabled={{ args['enabled'] }}
{%- if grains['os_family'] == 'RedHat' %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}
susemanager_token={{ args['token'] }}
gpgcheck={{ 1 if args['gpgcheck'] == "1" or args['pkg_gpgcheck'] != "0" else 0 }}
repo_gpgcheck={{ args['gpgcheck'] }}
{%- if salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
gpgkey=https://{{ args['host'] }}/pub/mgr-gpg-pub.key
{%- endif %}
{%- if grains['osmajorrelease'] >= 8 and args['cloned_nonmodular'] %}
module_hotfixes=1
{%- endif %}
{%- else %}
autorefresh={{ args['autorefresh'] }}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}?{{ args['token'] }}
gpgcheck={{ args['gpgcheck'] }}
repo_gpgcheck={{ args['repo_gpgcheck'] }}
pkg_gpgcheck={{ args['pkg_gpgcheck'] }}
{%- endif %}
type={{ args['type'] }}
{%- endif %}

{% endfor %}
07070100000051000081B400000000000000000000000160C1E96E0000059D000000000000000000000000000000000000003400000000susemanager-sls/salt/channels/disablelocalrepos.sls   # Disable all local repos matching or not matching the 'match_str'
# Default arguments: everything except *susemanager:*
{% if not repos_disabled is defined %}
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': false} %}
{% endif %}
{% do repos_disabled.update({'count': 0}) %}

{% if salt['config.get']('disable_local_repos', True) %}
{% set repos = salt['pkg.list_repos']() %}
{% for alias, data in repos.items() %}
{% if grains['os_family'] == 'Debian' %}
{% for entry in data %}
{% if (repos_disabled.match_str in entry['file'])|string == repos_disabled.matching|string and entry.get('enabled', True) %} 
disable_repo_{{ repos_disabled.count }}:
  mgrcompat.module_run:
    - name: pkg.mod_repo
    - repo: {{ "'" ~ entry.line ~ "'" }}
    - kwargs:
        disabled: True
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endfor %}
{% else %}
{% if (repos_disabled.match_str in alias)|string == repos_disabled.matching|string and data.get('enabled', True) %}
disable_repo_{{ alias }}:
  mgrcompat.module_run:
    - name: pkg.mod_repo
    - repo: {{ alias }}
    - kwargs:
        enabled: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
   07070100000052000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/dnf-susemanager-plugin  07070100000053000081B400000000000000000000000160C1E96E00000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   07070100000054000081B400000000000000000000000160C1E96E000001E0000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.py import dnf

class Susemanager(dnf.Plugin):

    name = 'susemanager'

    def __init__(self, base, cli):
        super(Susemanager, self).__init__(base, cli)

    def config(self):
        for repo in self.base.repos.get_matching("susemanager:*"):
            try:
                susemanager_token = repo.cfg.getValue(section=repo.id, key="susemanager_token")
                repo.set_http_headers(["X-Mgr-Auth: %s" % susemanager_token])
            except:
                pass
07070100000055000081B400000000000000000000000160C1E96E00000775000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/gpg-keys.sls    
{%- if salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
{%- if grains['os_family'] == 'Debian' %}
mgr_debian_repo_keyring:
  file.managed:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
    - source: salt://gpg/mgr-keyring.gpg
    - mode: 644
{% else %}
mgr_trust_customer_gpg_key:
  cmd.run:
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/mgr-gpg-pub.key
    - runas: root
{%- endif %}
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
trust_res_gpg_key:
  cmd.run:
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res:name') }}
    - runas: root

trust_suse_manager_tools_rhel_gpg_key:
  cmd.run:
{%- if grains['osmajorrelease']|int == 6 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res6tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res6tools:name') }}
{%- elif grains['osmajorrelease']|int == 7 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res7tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res7tools:name') }}
{%- elif grains['osmajorrelease']|int == 8 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res8tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res8tools:name') }}
{% else %}
    - name: /usr/bin/true
{%- endif %}
    - runas: root

{%- elif grains['os_family'] == 'Debian' %}
install_gnupg_debian:
  pkg.latest:
    - pkgs:
      - gnupg

trust_suse_manager_tools_deb_gpg_key:
  mgrcompat.module_run:
    - name: pkg.add_repo_key
    - path: https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:ubuntutools:file') }}
{%- endif %}
   07070100000056000081B400000000000000000000000160C1E96E00000E46000000000000000000000000000000000000002700000000susemanager-sls/salt/channels/init.sls    {%- if grains['os_family'] == 'RedHat' %}

{%- set yum_version = salt['pkg.version']("yum") %}
{%- set is_yum = yum_version and salt['pkg.version_cmp'](yum_version, "4") < 0 %}
{%- set is_dnf = salt['pkg.version']("dnf") %}

{%- if is_dnf %}
mgrchannels_susemanagerplugin_dnf:
  file.managed:
    - name: /usr/lib/python{{ grains['pythonversion'][0] }}.{{ grains['pythonversion'][1] }}/site-packages/dnf-plugins/susemanagerplugin.py
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_dnf:
  file.managed:
    - name: /etc/dnf/plugins/susemanagerplugin.conf
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644

mgrchannels_enable_dnf_plugins:
  file.replace:
    - name: /etc/dnf/dnf.conf
    - pattern: plugins=.*
    - repl: plugins=1
{#- default is '1' when option is not specififed #}
    - onlyif: grep -e 'plugins=0' -e 'plugins=False' -e 'plugins=no' /etc/dnf/dnf.conf
{%- endif %}

{%- if is_yum %}
mgrchannels_susemanagerplugin_yum:
  file.managed:
    - name: /usr/share/yum-plugins/susemanagerplugin.py
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_yum:
  file.managed:
    - name: /etc/yum/pluginconf.d/susemanagerplugin.conf
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644

mgrchannels_enable_yum_plugins:
  file.replace:
    - name: /etc/yum.conf
    - pattern: plugins=.*
    - repl: plugins=1
    - onlyif: grep plugins=0 /etc/yum.conf

{%- endif %}
{%- endif %}

mgrchannels_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: "/etc/zypp/repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'RedHat' %}
    - name: "/etc/yum.repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'Debian' %}
    - name: "/etc/apt/sources.list.d/susemanager:channels.list"
{%- endif %}
    - source:
      - salt://channels/channels.repo
    - template: jinja
    - user: root
    - group: root
    - mode: 644
{%- if grains['os_family'] == 'RedHat' %}
    - require:
{%- if is_dnf %}
       - file: mgrchannels_susemanagerplugin_dnf
       - file: mgrchannels_susemanagerplugin_conf_dnf
{%- endif %}
{%- if is_yum %}
       - file: mgrchannels_susemanagerplugin_yum
       - file: mgrchannels_susemanagerplugin_conf_yum
{%- endif %}
{%- endif %}

{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_support_acd = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}

{%- if apt_support_acd %}
aptauth_conf:
  file.managed:
    - name: "/etc/apt/auth.conf.d/susemanager.conf"
    - source:
      - salt://channels/aptauth.conf
    - template: jinja
    - user: _apt
    - group: root
    - mode: 600
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
{%- if is_dnf %}
mgrchannels_dnf_clean_all:
  cmd.run:
    - name: /usr/bin/dnf clean all
    - runas: root
    - onchanges:
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/dnf repolist | grep \"repolist: 0$\""
{%- endif %}
{%- if is_yum %}
mgrchannels_yum_clean_all:
  cmd.run:
    - name: /usr/bin/yum clean all
    - runas: root
    - onchanges: 
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/yum repolist | grep \"repolist: 0$\""
{%- endif %}
{%- endif %}

{% include 'channels/gpg-keys.sls' %}
  07070100000057000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/yum-susemanager-plugin  07070100000058000081B400000000000000000000000160C1E96E00000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   07070100000059000081B400000000000000000000000160C1E96E000001B2000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.py from yum.plugins import TYPE_CORE
from yum import config

requires_api_version = '2.5'
plugin_type = TYPE_CORE


def config_hook(conduit):
    config.RepoConf.susemanager_token = config.Option()


def init_hook(conduit):
    for repo in conduit.getRepos().listEnabled():
        susemanager_token = getattr(repo, 'susemanager_token', None)
        if susemanager_token:
            repo.http_headers['X-Mgr-Auth'] = susemanager_token
  0707010000005A000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002400000000susemanager-sls/salt/cleanup_minion   0707010000005B000081B400000000000000000000000160C1E96E0000029C000000000000000000000000000000000000002D00000000susemanager-sls/salt/cleanup_minion/init.sls  {%- if grains['os_family'] == 'RedHat' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/yum.repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/zypp/repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Debian' %}
mgrchannels_repo_clean_channels:
  file.absent:
    - name: /etc/apt/sources.list.d/susemanager:channels.list
mgrchannels_repo_clean_auth:
  file.absent:
    - name: /etc/apt/auth.conf.d/susemanager.conf

mgrchannels_repo_clean_keyring:
  file.absent:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
{%- endif %}
0707010000005C000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002800000000susemanager-sls/salt/cleanup_ssh_minion   0707010000005D000081B400000000000000000000000160C1E96E000005D4000000000000000000000000000000000000003100000000susemanager-sls/salt/cleanup_ssh_minion/init.sls  include:
    - cleanup_minion

{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove server to localhost aliasing from /etc/hosts
mgr_remove_mgr_server_localhost_alias:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}
{%- endif %}

# remove server ssh authorization
mgr_remove_mgr_ssh_identity:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/mgr_ssh_id.pub

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove proxy ssh authorization (if any)
mgr_remove_proxy_ssh_identity:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
{%- endif %}

{%- if salt['pillar.get']('mgr_sudo_user') and salt['pillar.get']('mgr_sudo_user') != 'root' %}
{%- set home = '/home/' ~ salt['pillar.get']('mgr_sudo_user') %}
{%- else %}
{%- set home = '/root' %}
{%- endif %}

# remove own key authorization
mgr_no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: {{ home }}/.ssh/mgr_own_id.pub

# remove own keys
mgr_remove_own_ssh_pub_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - ssh_auth: mgr_no_own_key_authorized

mgr_remove_own_ssh_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id
0707010000005E000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/clusters 0707010000005F000081B400000000000000000000000160C1E96E00000518000000000000000000000000000000000000002A00000000susemanager-sls/salt/clusters/addnode.sls {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_addnode:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
{%- for node in params.nodes %}
{%- set addparams = {'node_name': node.node_name, 'target': node.target, 'role': params.role, 'user': params.user, 'skuba_cluster_path': params.skuba_cluster_path } %}
mgr_cluster_add_node_{{ node.node_name }}:
  mgrcompat.module_run:
    - name: mgrclusters.add_node
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ addparams }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar['params'].get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_addnode
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('join', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}
{%- endfor %}

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('join', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('join', {}).get('after', [])%}
  - {{ hook }}
{%- endfor %}
07070100000060000081B400000000000000000000000160C1E96E000002C6000000000000000000000000000000000000003000000000susemanager-sls/salt/clusters/createcluster.sls   {%- if pillar.get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_createcluster:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['ssh_auth_sock'] }}
{%- endif %}

mgr_cluster_create_cluster:
  mgrcompat.module_run:
    - name: mgrclusters.create_cluster
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ pillar['params'] }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_createcluster
   {%- endif %}

include:
  - util.syncmodules
  07070100000061000081B400000000000000000000000160C1E96E000002D8000000000000000000000000000000000000002C00000000susemanager-sls/salt/clusters/listnodes.sls   {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_listnodes:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

mgr_cluster_list_nodes:
  mgrcompat.module_run:
    - name: mgrclusters.list_nodes
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ pillar['params'] }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar['params'].get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_listnodes
   {%- endif %}

include:
  - util.syncmodules
07070100000062000081B400000000000000000000000160C1E96E00000510000000000000000000000000000000000000002D00000000susemanager-sls/salt/clusters/removenode.sls  {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_clusters_removenode:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
{%- for node in params.nodes %}
{%- set removeparams = {'node_name': node.node_name, 'skuba_cluster_path': params.skuba_cluster_path, 'drain_timeout': params.drain_timeout } %}
mgr_cluster_remove_node_{{ node.node_name }}:
  mgrcompat.module_run:
    - name: mgrclusters.remove_node
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ removeparams }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_clusters_removenode
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}   
{%- endfor %}

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('after', []) %}
  - {{ hook }}
{%- endfor %}
07070100000063000081B400000000000000000000000160C1E96E00000437000000000000000000000000000000000000003100000000susemanager-sls/salt/clusters/upgradecluster.sls  {%- if pillar['params'].get('ssh_auth_sock', False) %}
mgr_ssh_agent_socket_upgradecluster:
  environ.setenv:
    - name: SSH_AUTH_SOCK
    - value: {{ pillar['params'].get('ssh_auth_sock') }}
{%- endif %}

{%- set params = pillar['params'] %}
mgr_cluster_upgrade_cluster:
  mgrcompat.module_run:
    - name: mgrclusters.upgrade_cluster
    - provider_module: {{ pillar['cluster_type'] }}
    - params: {{ params }}
    - require:
   {%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
   {%- else %}
      - mgrcompat: sync_modules
   {%- endif %}
   {%- if pillar.get('ssh_auth_sock', False) %}
      - environ: mgr_ssh_agent_socket_upgradecluster
   {%- endif %}
{%- for hook in pillar['state_hooks'].get('remove', {}).get('before', []) %}
      - sls: {{ hook }}
{%- endfor %}    

include:
  - util.syncmodules
{%- for hook in pillar['state_hooks'].get('upgrade', {}).get('before', []) %}
  - {{ hook }}
{%- endfor %}
{%- for hook in pillar['state_hooks'].get('upgrade', {}).get('after', []) %}
  - {{ hook }}
{%- endfor %}
 07070100000064000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002300000000susemanager-sls/salt/configuration    07070100000065000081B400000000000000000000000160C1E96E000002D5000000000000000000000000000000000000003400000000susemanager-sls/salt/configuration/deploy_files.sls   {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000066000081B400000000000000000000000160C1E96E000002D5000000000000000000000000000000000000003200000000susemanager-sls/salt/configuration/diff_files.sls {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000067000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/custom   07070100000068000081B400000000000000000000000160C1E96E00000036000000000000000000000000000000000000002500000000susemanager-sls/salt/custom/init.sls  include:
  - custom.custom_{{ grains['machine_id'] }}
  07070100000069000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002300000000susemanager-sls/salt/custom_groups    0707010000006A000081B400000000000000000000000160C1E96E00000091000000000000000000000000000000000000002C00000000susemanager-sls/salt/custom_groups/init.sls   {% if pillar.get('group_ids', []) -%}
include:
{% for gid in pillar.get('group_ids', []) -%}
  - custom.group_{{ gid }}
{% endfor %}
{% endif %}
   0707010000006B000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/salt/custom_org   0707010000006C000081B400000000000000000000000160C1E96E00000060000000000000000000000000000000000000002900000000susemanager-sls/salt/custom_org/init.sls  {% if pillar['org_id'] is defined %}
include:
  - custom.org_{{ pillar['org_id'] }}
{% endif %}
0707010000006D000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002100000000susemanager-sls/salt/distupgrade  0707010000006E000081B400000000000000000000000160C1E96E00000218000000000000000000000000000000000000002A00000000susemanager-sls/salt/distupgrade/init.sls {% if grains['os_family'] == 'Suse' %}
spmigration:
  mgrcompat.module_run:
    - name: pkg.upgrade
    - dist_upgrade: True
    - dryrun: {{ salt['pillar.get']('susemanager:distupgrade:dryrun', False) }}
{% if grains['osrelease_info'][0] >= 12 %}
    - novendorchange: {{ not salt['pillar.get']('susemanager:distupgrade:allowVendorChange', False) }}
{% else %}
    - fromrepo: {{ salt['pillar.get']('susemanager:distupgrade:channels', []) }}
{% endif %}
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
0707010000006F000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/hardware 07070100000070000081B400000000000000000000000160C1E96E00000F56000000000000000000000000000000000000003000000000susemanager-sls/salt/hardware/profileupdate.sls   {%- if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64', 'aarch64'] %}
mgr_install_dmidecode:
  pkg.installed:
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] %}
    - name: pmtools
{%- else %}
    - name: dmidecode
{%- endif %}
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{%- endif %}

grains:
  mgrcompat.module_run:
    - name: grains.items
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
cpuinfo:
  mgrcompat.module_run:
    - name: status.cpuinfo
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
udev:
  mgrcompat.module_run:
    - name: udev.exportdb
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
network-interfaces:
  mgrcompat.module_run:
    - name: network.interfaces
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
network-ips:
  mgrcompat.module_run:
    - name: sumautil.primary_ips
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_states
      - mgrcompat: sync_modules
{%- endif %}
network-modules:
  mgrcompat.module_run:
    - name: sumautil.get_net_modules
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_states
      - mgrcompat: sync_modules
{%- endif %}

{% if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64'] %}
smbios-records-bios:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 0
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-system:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 1
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-baseboard:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 2
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
smbios-records-chassis:
  mgrcompat.module_run:
    - name: smbios.records
    - rec_type: 3
    - clean: False
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% elif grains['cpuarch'] in ['s390', 's390x'] %}
mainframe-sysinfo:
  mgrcompat.module_run:
    - name: mainframesysinfo.read_values
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% endif %}

{%- if grains['saltversioninfo'][0] >= 2018 %}
{% if 'network.fqdns' in salt %}
fqdns:
  mgrcompat.module_run:
    - name: network.fqdns
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{% endif%}
{%- endif%}

include:
  - util.syncstates
  - util.syncmodules
  07070100000071000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/images   07070100000072000081B400000000000000000000000160C1E96E000008B7000000000000000000000000000000000000002700000000susemanager-sls/salt/images/docker.sls    {% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login:
  mgrcompat.module_run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_buildimage:
  mgrcompat.module_run:
    - name: docker.build
{%- if pillar.get('imagerepopath') is defined %}
    - repository: "{{ pillar.get('imagerepopath') }}"
    - tag: "{{ pillar.get('imagetag', 'latest') }}"
{%- else %}
    - repository: "{{ pillar.get('imagename') }}"
    - tag: "{{ pillar.get('imagename').rsplit(':', 1)[1] }}"
{%- endif %}
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
        {{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
    - require:
      - mgrcompat: mgr_registries_login

mgr_pushimage:
  mgrcompat.module_run:
    - name: docker.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_buildimage
      - mgrcompat: mgr_registries_login

{% if 'docker.logout' in salt %}

mgr_registries_logout:
  mgrcompat.module_run:
    - name: docker.logout
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}
    - require:
      - mgrcompat: mgr_pushimage
      - mgrcompat: mgr_registries_login

{% endif %}

{% else %}

mgr_registries_login:
  mgrcompat.module_run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_buildimage:
  mgrcompat.module_run:
    - name: dockerng.build
    - image: "{{ pillar.get('imagename') }}"
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
        {{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
    - require:
      - mgrcompat: mgr_registries_login

mgr_pushimage:
  mgrcompat.module_run:
    - name: dockerng.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_buildimage
      - mgrcompat: mgr_registries_login

{% endif %}
 07070100000073000081B400000000000000000000000160C1E96E0000162F000000000000000000000000000000000000003100000000susemanager-sls/salt/images/kiwi-image-build.sls  # SUSE Manager for Retail build trigger
#

{%- set source     = pillar.get('source') %}

{%- set kiwi_dir   = '/var/lib/Kiwi/' %}
{%- set common_repo = kiwi_dir + 'repo' %}

{%- set root_dir   = kiwi_dir + pillar.get('build_id') %}
{%- set source_dir = root_dir + '/source' %}
{%- set chroot_dir = root_dir + '/chroot/' %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
# cache dir is used only with Kiwi-ng
{%- set cache_dir  = root_dir + '/cache/' %}
{%- set bundle_id  = pillar.get('build_id') %}
{%- set activation_key = pillar.get('activation_key') %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}

mgr_buildimage_prepare_source:
  file.directory:
    - name: {{ root_dir }}
    - clean: True
  mgrcompat.module_run:
    - name: kiwi_source.prepare_source
    - source: {{ source }}
    - root: {{ root_dir }}

mgr_buildimage_prepare_activation_key_in_source:
  file.managed:
    - name: {{ source_dir }}/root/etc/salt/minion.d/kiwi_activation_key.conf
    - makedirs: True
    - contents: |
        grains:
          susemanager:
            activation_key: {{ activation_key }}

{%- if use_kiwi_ng %}
# KIWI NG
#
{%- set kiwi = 'kiwi-ng' %}

{%- set profile_opt = '' %}
{%- if pillar.get('kiwi_profile') %}
{%-   set profile_opt = '--profile ' + pillar.get('kiwi_profile') %}
{%- endif %}

{%- macro kiwi_params() -%}
  --ignore-repos-used-for-build --add-repo file:{{ common_repo }},rpm-dir,common_repo,90,false,false --add-bootstrap-package rhn-org-trusted-ssl-cert-osimage {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }},rpm-md,key_repo{{ loop.index }},90,false,false {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/prepare.log --shared-cache-dir={{ cache_dir }} {{ profile_opt }} system prepare --description {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - mgrcompat: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/create.log --shared-cache-dir={{ cache_dir }} {{ profile_opt }} system create --root {{ chroot_dir }} --target-dir  {{ dest_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} result bundle --target-dir {{ dest_dir }} --id {{ bundle_id }} --bundle-dir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create


{%- else %}
# KIWI Legacy
#

{%- set kiwi_help = salt['cmd.run']('kiwi --help') %}
{%- set have_bundle_build = kiwi_help.find('--bundle-build') > 0 %}

# i586 build on x86_64 host must be called with linux32
# let's consider the build i586 if there is no x86_64 repo specified
{%- set kiwi = 'linux32 kiwi' if (pillar.get('kiwi_repositories')|join(' ')).find('x86_64') == -1 and grains.get('osarch') == 'x86_64' else 'kiwi' %}

# in SLES11 Kiwi the --add-repotype is required
{%- macro kiwi_params() -%}
  --add-repo {{ common_repo }} --add-repotype rpm-dir --add-repoalias common_repo {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }} --add-repotype rpm-md --add-repoalias key_repo{{ loop.index }} {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

# old Kiwi can't change cache location, so we have to clear cache before each build
mgr_kiwi_clear_cache:
  file.directory:
    - name: /var/cache/kiwi/
    - makedirs: True
    - clean: True

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --force-new-root --prepare {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - mgrcompat: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --create {{ chroot_dir }} --dest {{ dest_dir }} {{ kiwi_params() }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

{%- if have_bundle_build %}
mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --bundle-build {{ dest_dir }} --bundle-id {{ bundle_id }} --destdir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create

{%- else %}

# SLE11 Kiwi does not have --bundle-build option, we have to create the bundle tarball ourselves:

mgr_buildimage_kiwi_bundle_dir:
  file.directory:
    - name: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_create

mgr_buildimage_kiwi_bundle_tarball:
  cmd.run:
    - name: "cd '{{ dest_dir }}' && tar czf '{{ bundle_dir }}'`basename *.packages .packages`-{{ bundle_id }}.tgz --no-recursion `find . -maxdepth 1 -type f`"
    - require:
      - file: mgr_buildimage_kiwi_bundle_dir

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "cd '{{ bundle_dir }}' && sha256sum *.tgz > `echo *.tgz`.sha256"
    - require:
      - cmd: mgr_buildimage_kiwi_bundle_tarball

{%- endif %}

{%- endif %}


{%- if pillar.get('use_salt_transport') %}
mgr_buildimage_kiwi_collect_image:
  mgrcompat.module_run:
    - name: cp.push_dir
    - path: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_bundle
{%- endif %}

mgr_buildimage_info:
  mgrcompat.module_run:
    - name: kiwi_info.image_details
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}
    - require:
{%- if pillar.get('use_salt_transport') %}
      - mgr_buildimage_kiwi_collect_image
{%- else %}
      - mgr_buildimage_kiwi_bundle
{%- endif %}
 07070100000074000081B400000000000000000000000160C1E96E0000025C000000000000000000000000000000000000003300000000susemanager-sls/salt/images/kiwi-image-inspect.sls    # SUSE Manager for Retail build trigger
#
{%- set root_dir   = '/var/lib/Kiwi/' + pillar.get('build_id') %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
{%- set bundle_id  = pillar.get('build_id') %}

# the goal is to collect all information required for
# saltboot image pillar

mgr_inspect_kiwi_image:
  mgrcompat.module_run:
    - name: kiwi_info.inspect_image
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}

mgr_kiwi_cleanup:
  cmd.run:
    - name: "rm -rf '{{ root_dir }}'"
    - require:
      - mgrcompat: mgr_inspect_kiwi_image
07070100000075000081B400000000000000000000000160C1E96E000009D1000000000000000000000000000000000000002E00000000susemanager-sls/salt/images/profileupdate.sls {% set container_name = salt['pillar.get']('mgr_container_name', 'mgr_container_' ~ range(1, 10000) | random )  %}

{% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login_inspect:
  mgrcompat.module_run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_image_profileupdate:
  mgrcompat.module_run:
    - name: docker.sls_build
    - repository: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_image_inspect:
  mgrcompat.module_run:
    - name: docker.inspect_image
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_container_remove:
  mgrcompat.module_run:
    - name: docker.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  mgrcompat.module_run:
    - name: docker.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% if 'docker.logout' in salt %}

mgr_registries_logout:
  mgrcompat.module_run:
    - name: docker.logout
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}
    - require:
      - mgrcompat: mgr_registries_login_inspect
      - mgrcompat: mgr_image_profileupdate

{% endif %}

{% else %}

mgr_registries_login_inspect:
  mgrcompat.module_run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_image_profileupdate:
  mgrcompat.module_run:
    - name: dockerng.sls_build
    - m_name: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_image_inspect:
  mgrcompat.module_run:
    - name: dockerng.inspect
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - mgrcompat: mgr_registries_login_inspect

mgr_container_remove:
  mgrcompat.module_run:
    - name: dockerng.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  mgrcompat.module_run:
    - name: dockerng.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% endif %}

include:
  - util.syncstates
   07070100000076000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/packages 07070100000077000081B400000000000000000000000160C1E96E000001E1000000000000000000000000000000000000002700000000susemanager-sls/salt/packages/init.sls    {%- if grains['os_family'] == 'Suse' and grains['osmajorrelease']|int > 11 and not grains['oscodename'] == 'openSUSE Leap 15.3'%}
mgr_install_products:
  product.all_installed:
    - refresh: True
    - require:
      - file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - mgrcompat: sync_states
{%- endif %}
{%- endif %}

include:
  - util.syncstates
  - .packages_{{ grains['machine_id'] }}
   07070100000078000081B400000000000000000000000160C1E96E00000158000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/patchdownload.sls   {% if pillar.get('param_patches', []) %}
pkg_downloaded-patches:
  pkg.patch_downloaded:
    - advisory_ids:
{%- for patch in pillar.get('param_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - require:
      - mgrcompat: applychannels
{% endif %}

applychannels:
    mgrcompat.module_run:
    -  name: state.apply
    -  mods: channels
07070100000079000081B400000000000000000000000160C1E96E00000489000000000000000000000000000000000000002F00000000susemanager-sls/salt/packages/patchinstall.sls    {% if grains.get('saltversioninfo', []) < [2015, 8, 12] %}
{{ salt.test.exception("You are running an old version of salt-minion that does not support patching. Please update salt-minion and try again.") }}
{% endif %}

{% if pillar.get('param_update_stack_patches', []) %}
mgr_update_stack_patches:
  pkg.patch_installed:
    - refresh: true
    - advisory_ids:
{%- for patch in pillar.get('param_update_stack_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% endif %}

{% if pillar.get('param_regular_patches', []) %}
mgr_regular_patches:
  pkg.patch_installed:
{% if not pillar.get('param_update_stack_patches', []) %}
    - refresh: true
{% endif %}
    - advisory_ids:
{%- for patch in pillar.get('param_regular_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% if pillar.get('param_update_stack_patches', []) %}
        - pkg: mgr_update_stack_patches
{% endif %}
{% endif %}

include:
  - channels
   0707010000007A000081B400000000000000000000000160C1E96E00000207000000000000000000000000000000000000002E00000000susemanager-sls/salt/packages/pkgdownload.sls {% if pillar.get('param_pkgs') %}
pkg_downloaded:
  pkg.downloaded:
    - pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    - require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
 0707010000007B000081B400000000000000000000000160C1E96E000002F6000000000000000000000000000000000000002D00000000susemanager-sls/salt/packages/pkginstall.sls  {% if pillar.get('param_pkgs') %}
pkg_installed:
  pkg.installed:
    -   refresh: true
{%- if grains['os_family'] == 'Debian' %}
    - skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
{%- endif %}
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}

{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
  0707010000007C000081B400000000000000000000000160C1E96E00000205000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgremove.sls   {% if pillar.get('param_pkgs') %}
pkg_removed:
  pkg.removed:
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
   0707010000007D000081B400000000000000000000000160C1E96E00000519000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/profileupdate.sls   packages:
  mgrcompat.module_run:
    - name: pkg.info_installed
    - kwargs: {
          attr: 'status,arch,epoch,version,release,install_date_time_t',
{%- if grains.get('__suse_reserved_pkg_all_versions_support', False) %}
          errors: report,
          all_versions: true
{%- else %}
          errors: report
{%- endif %}
      }
{% if grains['os_family'] == 'Suse' %}
products:
  mgrcompat.module_run:
    - name: pkg.list_products
{% elif grains['os_family'] == 'RedHat' %}
{% include 'packages/redhatproductinfo.sls' %}
{% elif grains['os_family'] == 'Debian' %}
debianrelease:
  cmd.run:
    - name: cat /etc/os-release
    - onlyif: test -f /etc/os-release
{% endif %}

include:
  - util.syncgrains
  - util.syncstates
  - util.syncmodules

grains_update:
  mgrcompat.module_run:
    - name: grains.items
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_grains
{%- else %}
      - mgrcompat: sync_grains
{%- endif %}

{% if not pillar.get('imagename') %}
kernel_live_version:
  mgrcompat.module_run:
    - name: sumautil.get_kernel_live_version
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - mgrcompat: sync_modules
{%- endif %}
{% endif %}
   0707010000007E000081B400000000000000000000000160C1E96E00000217000000000000000000000000000000000000003400000000susemanager-sls/salt/packages/redhatproductinfo.sls   {% if grains['os_family'] == 'RedHat' %}
rhelrelease:
  cmd.run:
    - name: cat /etc/redhat-release
    - onlyif: test -f /etc/redhat-release -a ! -L /etc/redhat-release
centosrelease:
  cmd.run:
    - name: cat /etc/centos-release
    - onlyif: test -f /etc/centos-release
oraclerelease:
  cmd.run:
    - name: cat /etc/oracle-release
    - onlyif: test -f /etc/oracle-release
respkgquery:
  cmd.run:
    - name: rpm -q --whatprovides 'sles_es-release-server'
    - onlyif: rpm -q --whatprovides 'sles_es-release-server'
{% endif %}
 0707010000007F000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002400000000susemanager-sls/salt/remotecommands   07070100000080000081B400000000000000000000000160C1E96E000000DE000000000000000000000000000000000000002D00000000susemanager-sls/salt/remotecommands/init.sls  remote_command:
  cmd.script:
    - source: {{ pillar.get('mgr_remote_cmd_script') }}
    - runas: {{ pillar.get('mgr_remote_cmd_runas', 'root') }}
    - timeout: {{ pillar.get('mgr_remote_cmd_timeout') }}
    # TODO GID

  07070100000081000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/scap 07070100000082000081B400000000000000000000000160C1E96E0000006E000000000000000000000000000000000000002300000000susemanager-sls/salt/scap/init.sls    mgr_scap:
  mgrcompat.module_run:
    - name: openscap.xccdf
    - params: {{ pillar.get('mgr_scap_params') }}  07070100000083000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/services 07070100000084000081B400000000000000000000000160C1E96E00000416000000000000000000000000000000000000002900000000susemanager-sls/salt/services/docker.sls  {% if pillar['addon_group_types'] is defined and 'container_build_host' in pillar['addon_group_types'] %}
mgr_install_docker:
  pkg.installed:
    - pkgs:
      - git-core
      - docker: '>=1.9.0'
{%- if grains['pythonversion'][0] == 3 %}
    {%- if grains['osmajorrelease'] == 12 %}
      - python3-docker-py: '>=1.6.0'
    {%- else %}
      - python3-docker: '>=1.6.0'
    {%- endif %}
{%- else %}
      - python-docker-py: '>=1.6.0'
{%- endif %}
{%- if grains['saltversioninfo'][0] >= 2018 %}
      - python3-salt
    {%- if grains['saltversioninfo'][0] < 3002 and salt['pkg.info_available']('python-Jinja2', 'python2-Jinja2') and salt['pkg.info_available']('python', 'python2') and salt['pkg.info_available']('python2-salt') %}
      - python2-salt
    {%- endif %}
{%- endif %}

mgr_docker_service:
  service.running:
    - name: docker
    - enable: True
    - require:
      - pkg: mgr_install_docker

mgr_min_salt:
  pkg.installed:
    - pkgs:
      - salt: '>=2016.11.1'
      - salt-minion: '>=2016.11.1'
    - order: last
{% endif %}
  07070100000085000081B400000000000000000000000160C1E96E00000A79000000000000000000000000000000000000003400000000susemanager-sls/salt/services/kiwi-image-server.sls   # Image Server installation state - part of SUSE Manager for Retail
#
# Copyright (c) 2017 - 2021 SUSE LLC

{% if pillar['addon_group_types'] is defined and 'osimage_build_host' in pillar['addon_group_types'] %}
{% set kiwi_dir = '/var/lib/Kiwi' %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}
{%- set available_packages = salt['pkg.search']('kiwi').keys() %}

{%- if use_kiwi_ng %}
mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - python3-kiwi
{%- if 'kiwi-systemdeps-disk-images' in available_packages %}
      - kiwi-systemdeps-disk-images
      - kiwi-systemdeps-image-validation
      - kiwi-systemdeps-iso-media
{%- endif %}
{%- if 'kiwi-systemdeps-containers' in available_packages %}
      - kiwi-systemdeps-containers
{%- endif %}
      - kiwi-boot-descriptions
{%- else %}
{% set kiwi_boot_modules = ['kiwi-desc-netboot', 'kiwi-desc-saltboot', 'kiwi-desc-vmxboot', 'kiwi-desc-oemboot', 'kiwi-desc-isoboot'] %}

mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - kiwi
{% for km in kiwi_boot_modules %}
    {% if km in available_packages %}
      - {{ km }}
    {% endif %}
{% endfor %}
{% endif %}

mgr_kiwi_build_tools:
  pkg.installed:
    - pkgs:
      - git-core

mgr_kiwi_dir_created:
  file.directory:
    - name: {{ kiwi_dir }}
    - user: root
    - group: root
    - dir_mode: 755

# repo for common kiwi build needs - mainly RPM with SUSE Manager certificate
mgr_kiwi_dir_repo_created:
  file.directory:
    - name: {{ kiwi_dir }}/repo
    - user: root
    - group: root
    - dir_mode: 755

mgr_osimage_cert_deployed:
  file.managed:
{%- if grains.get('osfullname') == 'SLES' and grains.get('osmajorrelease') == '11' %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
{%- else %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
{%- endif %}

mgr_sshd_installed_enabled:
  pkg.installed:
    - name: openssh
  service.running:
    - name: sshd
    - enable: True

mgr_sshd_public_key_copied:
  file.append:
    - name: /root/.ssh/authorized_keys
    - source: salt://salt_ssh/mgr_ssh_id.pub
    - makedirs: True
    - require:
      - pkg: mgr_sshd_installed_enabled

mgr_saltutil_synced:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_all
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_all
{%- endif %}

{% endif %}
   07070100000086000081B400000000000000000000000160C1E96E00000579000000000000000000000000000000000000002E00000000susemanager-sls/salt/services/salt-minion.sls {% include 'bootstrap/remove_traditional_stack.sls' %}

{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}

{# management keys should be used only once #}
{# removed to prevent trouble on the next regular minion restart #}
mgr_remove_management_key_grains:
  file.replace:
    - name: /etc/salt/minion.d/susemanager.conf
    - pattern: '^\s*management_key:.*$'
    - repl: ''
    - onlyif: grep 'management_key:' /etc/salt/minion.d/susemanager.conf

{# activation keys are only usefull on first registration #}
{# removed to prevent trouble on the next regular minion restart #}
mgr_remove_activation_key_grains:
  file.replace:
    - name: /etc/salt/minion.d/susemanager.conf
    - pattern: '^\s*activation_key:.*$'
    - repl: ''
    - onlyif: grep 'activation_key:' /etc/salt/minion.d/susemanager.conf

{# add SALT_RUNNING env variable in case it's not present on the configuration #}
mgr_append_salt_running_env_configuration:
  file.append:
    - name: /etc/salt/minion.d/susemanager.conf
    - text: |
        system-environment:
          modules:
            pkg:
              _:
                SALT_RUNNING: 1
    - unless: grep 'system-environment' /etc/salt/minion.d/susemanager.conf

mgr_salt_minion:
  pkg.installed:
    - name: salt-minion
    - order: last
  service.running:
    - name: salt-minion
    - enable: True
    - order: last
{% endif %}
   07070100000087000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002300000000susemanager-sls/salt/srvmonitoring    07070100000088000081B400000000000000000000000160C1E96E000008B2000000000000000000000000000000000000002F00000000susemanager-sls/salt/srvmonitoring/disable.sls    node_exporter_service:
  service.dead:
    - name: prometheus-node_exporter
    - enable: False

postgres_exporter_service:
  service.dead:
    - name: prometheus-postgres_exporter
    - enable: False

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_tomcat_config:
  cmd.run:
    - name: grep -q -v -- '-Dcom.sun.management.jmxremote.host=' /etc/sysconfig/tomcat && grep -q -v -- '-Dcom.sun.management.jmxremote.port=3333' /etc/sysconfig/tomcat && grep -q -v -- '-Dcom.sun.management.jmxremote.ssl=false' /etc/sysconfig/tomcat && grep -q -v -- '-Dcom.sun.management.jmxremote.authenticate=false' /etc/sysconfig/tomcat && grep -q -v -- '-Djava.rmi.server.hostname=' /etc/sysconfig/tomcat
    - require:
      - cmd: remove_tomcat_jmx_*

jmx_exporter_tomcat_service:
  service.dead:
    - name: prometheus-jmx_exporter@tomcat
    - enable: False
    - require:
      - cmd: jmx_tomcat_config

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_taskomatic_config:
  cmd.run:
    - name: grep -q -v -- '-Dcom.sun.management.jmxremote.host=' /etc/rhn/taskomatic.conf && grep -q -v -- '-Dcom.sun.management.jmxremote.port=3334' /etc/rhn/taskomatic.conf && grep -q -v -- '-Dcom.sun.management.jmxremote.ssl=false' /etc/rhn/taskomatic.conf && grep -q -v -- '-Dcom.sun.management.jmxremote.authenticate=false' /etc/rhn/taskomatic.conf && grep -q -v -- '-Djava.rmi.server.hostname=' /etc/rhn/taskomatic.conf
    - require:
      - cmd: remove_taskomatic_jmx_*

jmx_exporter_taskomatic_service:
  service.dead:
    - name: prometheus-jmx_exporter@taskomatic
    - enable: False
    - require:
      - cmd: jmx_taskomatic_config

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 0/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 0' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_disabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 0' /etc/rhn/rhn.conf
  07070100000089000081B400000000000000000000000160C1E96E00000F61000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/enable.sls node_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info golang-github-prometheus-node_exporter

node_exporter_service:
  service.running:
    - name: prometheus-node_exporter
    - enable: True
    - require:
      - cmd: node_exporter

postgres_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info golang-github-wrouesnel-postgres_exporter

postgres_exporter_configuration:
  file.managed:
    - name: /etc/postgres_exporter/postgres_exporter_queries.yaml
    - makedirs: True
    - source:
      - salt://srvmonitoring/postgres_exporter_queries.yaml
    - user: root
    - group: root
    - mode: 644

postgres_exporter_service:
  file.managed:
    - name: /etc/sysconfig/prometheus-postgres_exporter
    - source: salt://srvmonitoring/prometheus-postgres_exporter
    - template: jinja
    - user: root
    - group: root
    - mode: 644
    - require:
      - cmd: postgres_exporter
      - file: postgres_exporter_configuration
  service.running:
    - name: prometheus-postgres_exporter
    - enable: True
    - require:
      - file: postgres_exporter_service
    - watch:
      - file: postgres_exporter_configuration

jmx_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info prometheus-jmx_exporter prometheus-jmx_exporter-tomcat

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_tomcat_config:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)"/JAVA_OPTS="\1 -Dcom.sun.management.jmxremote.host=localhost -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname=localhost"/' /etc/sysconfig/tomcat
    - require:
      - cmd: remove_tomcat_jmx_*

jmx_exporter_tomcat_service:
  service.running:
    - name: prometheus-jmx_exporter@tomcat
    - enable: True
    - require:
      - cmd: jmx_exporter
      - cmd: jmx_tomcat_config

jmx_exporter_taskomatic_systemd_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/taskomatic/environment
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - contents: |
        PORT="5557"
        EXP_PARAMS=""

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_taskomatic_config:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)"/JAVA_OPTS="\1 -Dcom.sun.management.jmxremote.host=localhost -Dcom.sun.management.jmxremote.port=3334 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname=localhost"/' /etc/rhn/taskomatic.conf
    - require:
      - cmd: remove_taskomatic_jmx_*

jmx_exporter_taskomatic_yaml_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/taskomatic/prometheus-jmx_exporter.yml
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - contents: |
        hostPort: localhost:3334
        username:
        password:
        whitelistObjectNames:
          - java.lang:type=Threading,*
          - java.lang:type=Memory,*
          - Catalina:type=ThreadPool,name=*
        rules:
        - pattern: ".*"

jmx_exporter_taskomatic_service:
  service.running:
    - name: prometheus-jmx_exporter@taskomatic
    - enable: True
    - require:
      - cmd: jmx_exporter
      - cmd: jmx_taskomatic_config
      - file: jmx_exporter_taskomatic_systemd_config
      - file: jmx_exporter_taskomatic_yaml_config

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 1/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 1' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 1' /etc/rhn/rhn.conf
   0707010000008A000081B400000000000000000000000160C1E96E0000044E000000000000000000000000000000000000004200000000susemanager-sls/salt/srvmonitoring/postgres_exporter_queries.yaml mgr_serveractions:
  query: |
    SELECT (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Queued'
       )
    ) AS queued,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Picked Up'
       )
    ) AS picked_up,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Completed')
       )
    ) AS completed,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Failed')
       )
    ) AS failed;
  metrics:
    - queued:
        usage: "GAUGE"
        description: "Count of queued Actions"
    - picked_up:
        usage: "GAUGE"
        description: "Count of picked up Actions"
    - completed:
        usage: "COUNTER"
        description: "Count of completed Actions"
    - failed:
        usage: "COUNTER"
        description: "Count of failed Actions"
  0707010000008B000081B400000000000000000000000160C1E96E00000324000000000000000000000000000000000000004000000000susemanager-sls/salt/srvmonitoring/prometheus-postgres_exporter   ## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        "postgresql://user:passwd@localhost:5432/database?sslmode=disable"
## ServiceRestart: postgres-exporter
#
# Connection URL to postgresql instance
#
DATA_SOURCE_NAME="postgresql://{{ pillar['db_user'] }}:{{ pillar['db_pass'] }}@{{ pillar['db_host'] }}:{{ pillar['db_port'] }}/{{ pillar['db_name'] }}?sslmode=disable"

## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        ""
## ServiceRestart: postgres-exporter
#
# Extra options for postgres-exporter
#
POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml"
0707010000008C000081B400000000000000000000000160C1E96E0000059B000000000000000000000000000000000000003600000000susemanager-sls/salt/srvmonitoring/removejmxprops.sls remove_{{remove_jmx_props.service}}_jmx_host:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Dcom\.sun\.management\.jmxremote\.host=\S*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.host=' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_port:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Dcom\.sun\.management\.jmxremote\.port=[0-9]*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -E -- '-Dcom\.sun\.management\.jmxremote\.port=[0-9]+' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_ssl:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.ssl=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.ssl=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_auth:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.authenticate=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.authenticate=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_hostname:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Djava\.rmi\.server\.hostname=\S*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Djava.rmi.server.hostname=' {{remove_jmx_props.file}}
 0707010000008D000081B400000000000000000000000160C1E96E0000059B000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/status.sls jmx_taskomatic_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-jmx_exporter@taskomatic.service"

jmx_tomcat_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-jmx_exporter@tomcat.service"

node_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-node_exporter.service"

postgres_exporter_service:
  mgrcompat.module_run:
    - name: service.status
    - m_name: "prometheus-postgres_exporter.service"

jmx_tomcat_java_config:
  mgrcompat.module_run:
    - name: file.search
    - path: /etc/sysconfig/tomcat
    - pattern: "-Dcom\\.sun\\.management\\.jmxremote\\.host=\\S* -Dcom\\.sun\\.management\\.jmxremote\\.port=3333 -Dcom\\.sun\\.management\\.jmxremote\\.ssl=false -Dcom\\.sun\\.management\\.jmxremote\\.authenticate=false -Djava\\.rmi\\.server\\.hostname="

jmx_taskomatic_java_config:
  mgrcompat.module_run:
    - name: file.search
    - path: /etc/rhn/taskomatic.conf
    - pattern: "-Dcom\\.sun\\.management\\.jmxremote\\.host=\\S* -Dcom\\.sun\\.management\\.jmxremote\\.port=3334 -Dcom\\.sun\\.management\\.jmxremote\\.ssl=false -Dcom\\.sun\\.management\\.jmxremote\\.authenticate=false -Djava\\.rmi\\.server\\.hostname="

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -q 'prometheus_monitoring_enabled\s*=\s*1\s*$' /etc/rhn/rhn.conf

include:
  - util.syncstates
 0707010000008E000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002300000000susemanager-sls/salt/ssh_bootstrap    0707010000008F000081B400000000000000000000000160C1E96E000007F9000000000000000000000000000000000000002C00000000susemanager-sls/salt/ssh_bootstrap/init.sls   mgr_ssh_identity:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/mgr_ssh_id.pub
{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
mgr_server_localhost_alias_present:
  host.present:
{% else %}
mgr_server_localhost_alias_absent:
  host.absent:
{% endif %}
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
no_push_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - comment: susemanager-ssh-push

proxy_ssh_identity:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
    - require:
      - ssh_auth: no_push_key_authorized
{%- endif %}

{%- if salt['pillar.get']('mgr_sudo_user') and salt['pillar.get']('mgr_sudo_user') != 'root' %}
{%- set home = '/home/' ~ salt['pillar.get']('mgr_sudo_user') %}
{%- else %}
{%- set home = '/root' %}
{%- endif %}

generate_own_ssh_key:
  cmd.run:
    - name: ssh-keygen -N '' -C 'susemanager-own-ssh-push' -f {{ home }}/.ssh/mgr_own_id -t rsa -q
    - creates: {{ home }}/.ssh/mgr_own_id.pub

ownership_own_ssh_key:
  file.managed:
    - name: {{ home }}/.ssh/mgr_own_id
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - require:
      - cmd: generate_own_ssh_key

no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - comment: susemanager-own-ssh-push
    - require:
      - file: ownership_own_ssh_key

authorize_own_key:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - file: ownership_own_ssh_key
      - ssh_auth: no_own_key_authorized

{% include 'channels/gpg-keys.sls' %}
{% include 'bootstrap/remove_traditional_stack.sls' %}
   07070100000090000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/util 07070100000091000081B400000000000000000000000160C1E96E00000113000000000000000000000000000000000000003600000000susemanager-sls/salt/util/mgr_disable_fqdns_grain.sls mgr_disable_fqdns_grains:
  file.append:
    - name: /etc/salt/minion.d/susemanager.conf
    - text: "enable_fqdns_grains: False"

mgr_salt_minion:
  service.running:
   - name: salt-minion
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_fqdns_grains
 07070100000092000081B400000000000000000000000160C1E96E00000168000000000000000000000000000000000000003700000000susemanager-sls/salt/util/mgr_mine_config_clean_up.sls    {%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
mgr_disable_mine:
  file.managed:
    - name: /etc/salt/minion.d/susemanager-mine.conf
    - contents: "mine_enabled: False"

mgr_salt_minion:
  service.running:
   - name: salt-minion
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_mine
{% endif %}
07070100000093000081B400000000000000000000000160C1E96E000000CC000000000000000000000000000000000000003500000000susemanager-sls/salt/util/mgr_start_event_grains.sls  mgr_start_event_grains:
  file.append:
    - name: /etc/salt/minion.d/susemanager.conf
    - text: |
        start_event_grains:
          - machine_id
          - saltboot_initrd
          - susemanager
07070100000094000081B400000000000000000000000160C1E96E0000001B000000000000000000000000000000000000002300000000susemanager-sls/salt/util/noop.sls    mgr_do_nothing:
  test.nop
 07070100000095000081B400000000000000000000000160C1E96E000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncbeacons.sls sync_beacons:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_beacons
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_beacons
{%- endif %}
07070100000096000081B400000000000000000000000160C1E96E0000005B000000000000000000000000000000000000002C00000000susemanager-sls/salt/util/synccustomall.sls   include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
 07070100000097000081B400000000000000000000000160C1E96E000000D8000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncgrains.sls  sync_grains:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_grains:
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_grains
{%- endif %}
    - reload_grains: true
07070100000098000081B400000000000000000000000160C1E96E000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncmodules.sls sync_modules:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_modules
{%- else %}
  mgrcompat.module_run:
    - name: saltutil.sync_modules
{%- endif %}
07070100000099000081B400000000000000000000000160C1E96E000000E5000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncstates.sls  sync_states:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_states
{%- elif salt['saltutil.sync_states']() or True %}
  mgrcompat.module_run:
    - name: saltutil.sync_states
{%- endif %}

   0707010000009A000081B400000000000000000000000160C1E96E000000FD000000000000000000000000000000000000002900000000susemanager-sls/salt/util/systeminfo.sls  include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
status_uptime:
  mgrcompat.module_run:
    - name: status.uptime
grains_update:
  mgrcompat.module_run:
    - name: grains.item
    - args:
      - kernelrelease
   0707010000009B000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/virt 0707010000009C000081B400000000000000000000000160C1E96E000005A6000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/create-vm.sls   domain_define:
    virt.running:
        - name: {{ pillar['name'] }}
        - cpu: {{ pillar['vcpus'] }}
        - mem: {{ pillar['mem'] // 1024 }}
        - os_type: {{ pillar['os_type'] }}
        - arch: {{ pillar['arch'] }}
        - vm_type: {{ pillar['vm_type'] }}
        - disks:
{% for disk in pillar['disks'] %}
            - name: {{ disk['name'] }}
              model: {{ disk['model'] }}
    {% if 'device' in disk %}
              device: {{ disk['device'] }}
    {% endif %}
    {% if 'format' in disk %}
              format: {{ disk['format'] }}
    {% endif %}
    {% if 'source_file' in disk %}
              source_file: {{ disk['source_file'] if disk['source_file'] != '' else 'null' }}
    {% endif %}
    {% if 'pool' in disk %}
              pool: {{ disk['pool'] }}
    {% endif %}
    {% if 'size' in disk %}
              size: {{ disk['size'] }}
    {% endif %}
    {% if 'image' in disk %}
              image: {{ disk['image'] }}
    {% endif %}
{% endfor %}
{% if 'interfaces' in pillar %}
        - interfaces:
    {% for nic in pillar['interfaces'] %}
            - name: {{ nic['name'] }}
              type: {{ nic['type'] }}
              source: {{ nic['source'] }}
        {% if 'mac' in nic %}
              mac: {{ nic['mac'] if nic['mac'] != '' else 'null' }}
        {% endif %}
    {% endfor %}
{% endif %}
        - graphics:
            type: {{ pillar['graphics']['type'] }}
        - seed: False
  0707010000009D000081B400000000000000000000000160C1E96E000000D9000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/deleted.sls vm_stopped:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

mgr_virt_destroy:
  mgrcompat.module_run:
    - name: virt.purge
    - vm_: {{ pillar['domain_name'] }}
    - require:
      - virt: vm_stopped
   0707010000009E000081B400000000000000000000000160C1E96E00000113000000000000000000000000000000000000002C00000000susemanager-sls/salt/virt/engine-events.sls   {% if pillar['virt_entitled'] %}
/etc/salt/minion.d/libvirt-events.conf:
  file.managed:
    - contents: |
        engines:
          - libvirt_events

/var/cache/virt_state.cache:
  file.absent

{% else %}

/etc/salt/minion.d/libvirt-events.conf:
  file.absent

{% endif %}
 0707010000009F000081B400000000000000000000000160C1E96E0000020C000000000000000000000000000000000000003200000000susemanager-sls/salt/virt/network-statechange.sls {% if pillar['network_state'] != 'delete' %}
mgr_network_{{ pillar['network_state'] }}:
  mgrcompat.module_run:
    - name: virt.network_{{ pillar['network_state'] }}
    - m_name: {{ pillar['network_name'] }}

{% else %}
mgr_network_stop:
  mgrcompat.module_run:
    - name: virt.network_stop
    - m_name: {{ pillar['network_name'] }}

mgr_network_delete:
  mgrcompat.module_run:
    - name: virt.network_undefine
    - m_name: {{ pillar['network_name'] }}
    - require:
        - mgrcompat: mgr_network_stop
{% endif %}
070701000000A0000081B400000000000000000000000160C1E96E000015E4000000000000000000000000000000000000002A00000000susemanager-sls/salt/virt/pool-create.sls {% set pool_state = salt.virt.pool_info(pillar['pool_name']).get(pillar['pool_name'], {}).get('state') %}
{% set state = 'running' if pool_state == 'running' else pillar['action_type'] %}

pool_{{ state }}:
  virt.pool_{{ state }}:
    - name: {{ pillar['pool_name'] }}
    - ptype: {{ pillar['pool_type'] }}
    {% if pillar['target']|default(none) %}
    - target: {{ pillar['target'] }}
    {% endif %}
    - autostart: {{ pillar['autostart'] }}
    {% if pillar['permissions']|default(none) %}
    - permissions:
      {% if pillar['permissions']['mode']|default(none) %}
        mode: {{ pillar['permissions']['mode'] }}
      {% endif %}
      {% if pillar['permissions']['owner']|default(none) %}
        owner: {{ pillar['permissions']['owner'] }}
      {% endif %}
      {% if pillar['permissions']['group']|default(none) %}
        group: {{ pillar['permissions']['group'] }}
      {% endif %}
      {% if pillar['permissions']['label']|default(none) %}
        label: {{ pillar['permissions']['label'] }}
      {% endif %}
    {% endif %}  {# pillar['permissions']['mode']|default(none) #}
    {% if pillar['source']|default(none) %}
    - source:
      {% if pillar['source']['dir']|default(none) %}
        dir: {{ pillar['source']['dir'] }}
      {% endif %}
      {% if pillar['source']['name']|default(none) %}
        name: {{ pillar['source']['name'] }}
      {% endif %}
      {% if pillar['source']['format']|default(none) %}
        format: {{ pillar['source']['format'] }}
      {% endif %}
      {% if pillar['source']['initiator']|default(none) %}
        initiator: {{ pillar['source']['initiator'] }}
      {% endif %}
      {% if pillar['source']['hosts']|default(none) %}
        hosts:
        {% for host in pillar['source']['hosts'] %}
          - {{ host }}
        {% endfor %}
      {% endif %}  {# pillar['source']['hosts']|default(none) #}
      {% if pillar['source']['auth']|default(none) %}
        auth:
          username: {{ pillar['source']['auth']['username'] }}
          password: {{ pillar['source']['auth']['password'] }}
      {% endif %}  {# pillar['source']['auth']|default(none) #}
      {% if pillar['source']['devices']|default(none) %}
        devices:
        {% for device in pillar['source']['devices'] %}
          - path: {{ device['path'] }}
          {% if device['part_separator']|default(none) %}
            part_separator: {{ device['part_separator'] }}
          {% endif %}
        {% endfor %}
      {% endif %}  {# pillar['source']['devices']|default(none) #}
      {% if pillar['source']['adapter']|default(none) %}
        adapter:
        {% if pillar['source']['adapter']['type']|default(none) %}
          type: {{ pillar['source']['adapter']['type'] }}
        {% endif %}
        {% if pillar['source']['adapter']['name']|default(none) %}
          name: {{ pillar['source']['adapter']['name'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent']|default(none) %}
          parent: {{ pillar['source']['adapter']['parent'] }}
        {% endif %}
        {% if pillar['source']['adapter']['managed']|default(none) %}
          managed: {{ pillar['source']['adapter']['managed'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_wwnn']|default(none) %}
          parent_wwnn: {{ pillar['source']['adapter']['parent_wwnn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_wwpn']|default(none) %}
          parent_wwpn: {{ pillar['source']['adapter']['parent_wwpn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_fabric_wwn']|default(none) %}
          parent_fabric_wwn: {{ pillar['source']['adapter']['parent_fabric_wwn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['wwnn']|default(none) %}
          wwnn: {{ pillar['source']['adapter']['wwnn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['wwpn']|default(none) %}
          wwpn: {{ pillar['source']['adapter']['wwpn'] }}
        {% endif %}
        {% if pillar['source']['adapter']['parent_address']|default(none) %}
          parent_address:
          {% if pillar['source']['adapter']['parent_address']['unique_id']|default(none) %}
            unique_id: {{ pillar['source']['adapter']['parent_address']['unique_id'] }}
          {% endif %}
          {% if pillar['source']['adapter']['parent_address']['address']|default(none) %}
            address:
            {% if pillar['source']['adapter']['parent_address']['address']['domain']|default(none) %}
              domain: {{ pillar['source']['adapter']['parent_address']['address']['domain'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['bus']|default(none) %}
              bus: {{ pillar['source']['adapter']['parent_address']['address']['bus'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['slot']|default(none) %}
              slot: {{ pillar['source']['adapter']['parent_address']['address']['slot'] }}
            {% endif %}
            {% if pillar['source']['adapter']['parent_address']['address']['function']|default(none) %}
              function: {{ pillar['source']['adapter']['parent_address']['address']['function'] }}
            {% endif %}
          {% endif %}  {# pillar['source']['adapter']['parent_address']['address']|default(none) #}
        {% endif %}  {# pillar['source']['adapter']['parent_address']|default(none) #}
      {% endif %}  {# pillar['source']['adapter']|default(none) #}
    {% endif %}  {# pillar['source']|default(none) #}
070701000000A1000081B400000000000000000000000160C1E96E00000075000000000000000000000000000000000000002B00000000susemanager-sls/salt/virt/pool-deleted.sls    mgr_pool_deleted:
  virt.pool_deleted:
    - name: {{ pillar['pool_name'] }}
    - purge: {{ pillar['pool_purge'] }}
   070701000000A2000081B400000000000000000000000160C1E96E00000072000000000000000000000000000000000000002D00000000susemanager-sls/salt/virt/pool-refreshed.sls  mgr_pool_refreshed:
  mgrcompat.module_run:
    - name: virt.pool_refresh
    - m_name: {{ pillar['pool_name'] }}
  070701000000A3000081B400000000000000000000000160C1E96E00000096000000000000000000000000000000000000002F00000000susemanager-sls/salt/virt/pool-statechange.sls    mgr_pool_{{ pillar['pool_state'] }}:
  mgrcompat.module_run:
    - name: virt.pool_{{ pillar['pool_state'] }}
    - m_name: {{ pillar['pool_name'] }}
  070701000000A4000081B400000000000000000000000160C1E96E000000B6000000000000000000000000000000000000002400000000susemanager-sls/salt/virt/reset.sls   powered_off:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

restarted:
  virt.running:
    - name: {{ pillar['domain_name'] }}
    - require:
      - virt: powered_off
  070701000000A5000081B400000000000000000000000160C1E96E00000068000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/resumed.sls mgr_virt_resume:
  mgrcompat.module_run:
    - name: virt.resume
    - vm_: {{ pillar['domain_name'] }}
070701000000A6000081B400000000000000000000000160C1E96E000000A9000000000000000000000000000000000000002500000000susemanager-sls/salt/virt/setmem.sls  mgr_virt_mem:
  mgrcompat.module_run:
    - name: virt.setmem
    - vm_: {{ pillar['domain_name'] }}
    - memory: {{ pillar['domain_mem'] // 1024 }}
    - config: True
   070701000000A7000081B400000000000000000000000160C1E96E000000A6000000000000000000000000000000000000002700000000susemanager-sls/salt/virt/setvcpus.sls    mgr_virt_vcpus:
  mgrcompat.module_run:
    - name: virt.setvcpus
    - vm_: {{ pillar['domain_name'] }}
    - vcpus: {{ pillar['domain_vcpus'] }}
    - config: True
  070701000000A8000081B400000000000000000000000160C1E96E00000043000000000000000000000000000000000000002A00000000susemanager-sls/salt/virt/statechange.sls {{ pillar['domain_name'] }}:
    virt.{{ pillar['domain_state'] }}
 070701000000A9000081B400000000000000000000000160C1E96E00000068000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/suspended.sls   mgr_virt_suspend:
  mgrcompat.module_run:
    - name: virt.pause
    - vm_: {{ pillar['domain_name'] }}
070701000000AA000081B400000000000000000000000160C1E96E000005F5000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/update-vm.sls   domain_update:
    mgrcompat.module_run:
        - name: virt.update
        - m_name: {{ pillar['name'] }}
        - cpu: {{ pillar['vcpus'] }}
        - mem: {{ pillar['mem'] // 1024 }}
{% if 'disks' in pillar %}
        - disks:
    {% for disk in pillar['disks'] %}
            - name: {{ disk['name'] }}
              model: {{ disk['model'] }}
        {% if 'device' in disk %}
              device: {{ disk['device'] }}
        {% endif %}
        {% if 'type' in disk %}
              type: {{ disk['type'] }}
        {% endif %}
        {% if 'format' in disk %}
              format: {{ disk['format'] }}
        {% endif %}
        {% if 'source_file' in disk %}
              source_file: {{ disk['source_file'] if disk['source_file'] != '' else 'null' }}
        {% endif %}
        {% if 'pool' in disk %}
              pool: {{ disk['pool'] }}
        {% endif %}
        {% if 'size' in disk %}
              size: {{ disk['size'] }}
        {% endif %}
        {% if 'image' in disk %}
              image: {{ disk['image'] }}
        {% endif %}
    {% endfor %}
{% endif %}
{% if 'interfaces' in pillar %}
        - interfaces:
    {% for nic in pillar['interfaces'] %}
            - name: {{ nic['name'] }}
              type: {{ nic['type'] }}
              source: {{ nic['source'] }}
        {% if 'mac' in nic %}
              mac: {{ nic['mac'] if nic['mac'] != '' else 'null' }}
        {% endif %}
    {% endfor %}
{% endif %}
        - graphics:
            type: {{ pillar['graphics']['type'] }}
   070701000000AB000081B400000000000000000000000160C1E96E000000F2000000000000000000000000000000000000002D00000000susemanager-sls/salt/virt/volume-deleted.sls  include:
  - virt.pool-refreshed

mgr_volume_deleted:
  mgrcompat.module_run:
    - name: virt.volume_delete
    - pool: {{ pillar['pool_name'] }}
    - volume: {{ pillar['volume_name'] }}
    - require_in:
        - sls: virt.pool-refreshed
  070701000000AC000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001500000000susemanager-sls/scap  070701000000AD000081B400000000000000000000000160C1E96E00001532000000000000000000000000000000000000002A00000000susemanager-sls/scap/xccdf-resume.xslt.in <?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright 2012 Red Hat Inc., Durham, North Carolina. All Rights Reserved.

This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License.

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
details.

You should have received a copy of the GNU Lesser General Public License along
with this library; if not, write to the Free Software Foundation, Inc., 59
Temple Place, Suite 330, Boston, MA  02111-1307 USA

Authors:
     Simon Lukasik <slukasik@redhat.com>
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
    xmlns:cdf1="http://checklists.nist.gov/xccdf/1.1"
    xmlns:cdf2="http://checklists.nist.gov/xccdf/1.2">
    <xsl:output method="xml" encoding="UTF-8"/>

    <xsl:template match="/">
        <benchmark-resume>
            <xsl:apply-templates select="*[local-name()='Benchmark']"/>
        </benchmark-resume>
    </xsl:template>

    <xsl:template match="cdf1:Benchmark | cdf2:Benchmark">
        <xsl:copy-of select="@id"/>
        <xsl:attribute name="version">
            <xsl:value-of select="normalize-space(cdf1:version/text()|cdf2:version/text())"/>
        </xsl:attribute>

        <xsl:variable name="profileId" select="cdf1:TestResult[1]/cdf1:profile/@idref | cdf2:TestResult[1]/cdf2:profile/@idref"/>
        <xsl:choose>
            <xsl:when test="not($profileId)"/> <!-- Do not send profile element when scanning with 'default' profile. -->
            <xsl:when test="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]">
                <xsl:apply-templates select="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]"/>
            </xsl:when>
            <xsl:otherwise>
                <profile title="Tailored profile">
                    <xsl:attribute name="id">
                         <xsl:value-of select="$profileId"/>
                    </xsl:attribute>
                </profile>
            </xsl:otherwise>
        </xsl:choose>
        <xsl:apply-templates select="cdf1:TestResult[1] | cdf2:TestResult[1]"/>
    </xsl:template>

    <xsl:template match="cdf1:Profile | cdf2:Profile">
        <profile>
            <xsl:attribute name="title">
                <xsl:value-of select="normalize-space(cdf1:title/text() | cdf2:title/text())"/>
            </xsl:attribute>
            <xsl:copy-of select="@id"/>
            <xsl:attribute name="description">
                <xsl:value-of select="normalize-space(cdf1:description[@xml:lang='en-US']/text() | cdf2:description[@xml:lang='en-US']/text())"/>
            </xsl:attribute>
        </profile>
    </xsl:template>

    <xsl:template match="cdf1:TestResult | cdf2:TestResult">
        <TestResult>
            <xsl:copy-of select="@id"/>
            <xsl:copy-of select="@start-time"/>
            <xsl:copy-of select="@end-time"/>
            <pass>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'pass'] | cdf2:rule-result[cdf2:result = 'pass']"/>
            </pass>
            <fail>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fail'] | cdf2:rule-result[cdf2:result = 'fail']"/>
            </fail>
            <error>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'error'] | cdf2:rule-result[cdf2:result = 'error']"/>
            </error>
            <unknown>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'unknown'] | cdf2:rule-result[cdf2:result = 'unknown']"/>
            </unknown>
            <notapplicable>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notapplicable'] | cdf2:rule-result[cdf2:result = 'notapplicable']"/>
            </notapplicable>
            <notchecked>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notchecked'] | cdf2:rule-result[cdf2:result = 'notchecked']"/>
            </notchecked>
            <notselected>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notselected'] | cdf2:rule-result[cdf2:result = 'notselected']"/>
            </notselected>
            <informational>
                   <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'informational'] | cdf2:rule-result[cdf2:result = 'informational']"/>
            </informational>
            <fixed>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fixed'] | cdf2:rule-result[cdf2:result = 'fixed']"/>
            </fixed>
        </TestResult>
    </xsl:template>

    <xsl:template match="cdf1:rule-result | cdf2:rule-result">
        <rr>
            <xsl:attribute name="id">
                <xsl:value-of select="normalize-space(@idref)"/>
            </xsl:attribute>
            <xsl:apply-templates select="cdf1:ident | cdf2:ident"/>
        </rr>
    </xsl:template>

    <xsl:template match="cdf1:ident | cdf2:ident">
        <ident>
            <xsl:copy-of select="@system"/>
            <xsl:value-of select="normalize-space(text())"/>
        </ident>
    </xsl:template>
</xsl:stylesheet>
  070701000000AE000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001400000000susemanager-sls/src   070701000000AF000081B400000000000000000000000160C1E96E0000021B000000000000000000000000000000000000001E00000000susemanager-sls/src/README.md ## Python Code Maintenance

Test are written with PyTest. This way:

1. Create your "test_foo.py" file.

2. Import with double-dot your package,
   so it will be included in the sys path, e.g.:

   from ..beacons import pkgset

3. Create a test function "def test_my_foo(..."

4. Rock-n-roll by simply calling "py.test".


Don't mind `.cache` and `__pycache__` directories,
they are ignored in an explicit `.gitignore`.

Have fun. :)

## Run Unit tests 

Use the following command to run unit test 
`make -f Makefile.python docker_pytest`
 070701000000B0000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002000000000susemanager-sls/src/__init__.py   070701000000B1000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001C00000000susemanager-sls/src/beacons   070701000000B2000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002800000000susemanager-sls/src/beacons/__init__.py   070701000000B3000081B400000000000000000000000160C1E96E000006C4000000000000000000000000000000000000002600000000susemanager-sls/src/beacons/pkgset.py # -*- coding: utf-8 -*-
'''
Watch libzypp/RPM database via cookies and fire
an event to the SUSE Manager if that has been changed.

Author: Bo Maryniuk <bo@suse.de>
'''

from __future__ import absolute_import
import os
import logging
log = logging.getLogger(__name__)


__virtualname__ = 'pkgset'


def __virtual__():
    return (
        os.path.exists("/usr/lib/zypp/plugins/commit/susemanager") or  # Remove this once 2015.8.7 not in use
        os.path.exists("/usr/lib/zypp/plugins/commit/zyppnotify") or
        os.path.exists("/usr/share/yum-plugins/susemanagerplugin.py") or  # Remove this once 2015.8.7 not in use
        os.path.exists("/usr/share/yum-plugins/yumnotify.py")
    ) and __virtualname__ or False


def validate(config):
    '''
    Validate the beacon configuration. A "cookie" file path is mandatory.
    '''

    if not config.get('cookie'):
        return False, 'Cookie path has not been set.'

    return True, 'Configuration validated'


def beacon(config):
    '''
    Watch the cookie file from libzypp's plugin. If its content changes, fire an event to the Master.

    Example Config

    .. code-block:: yaml

        beacons:
          pkgset:
            cookie: /path/to/cookie/file
            interval: 5

    '''

    ret = []
    if os.path.exists(config.get('cookie', '')):
        with open(config.get('cookie')) as ck_file:
            ck_data = ck_file.read().strip()
            if __virtualname__ not in __context__:
                __context__[__virtualname__] = ck_data
            if __context__[__virtualname__] != ck_data:
                ret.append({
                    'tag': 'changed'
                })
                __context__[__virtualname__] = ck_data

    return ret
070701000000B4000081B400000000000000000000000160C1E96E000033A6000000000000000000000000000000000000002A00000000susemanager-sls/src/beacons/virtpoller.py # -*- coding: utf-8 -*-
#
# Copyright (c) 2008--2014 Red Hat, Inc.
# Copyright (c) 2016 SUSE LLC
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
'''
Watch libvirt and fire events with changes to virtual machines

Author: Michael Calmer <mc@suse.com>
'''

from __future__ import absolute_import
import sys
import os
import logging
log = logging.getLogger(__name__)

try:
    import libvirt  # pylint: disable=import-error
    from libvirt import libvirtError
    HAS_LIBVIRT = True
except ImportError:
    HAS_LIBVIRT = False
    libvirt = None


try:
    import cPickle as pickle
except ImportError:
    import pickle
import time
import traceback
import binascii

CACHE_DATA_PATH = '/var/cache/virt_state.cache'
CACHE_EXPIRE_SECS = 60 * 60 * 6   # 6 hours, in seconds

##
# This structure maps the libvirt state enumeration to labels that
# SUSE Manager understands.
# Reasons we don't care about differences between NOSTATE, RUNNING and BLOCKED:
# 1. technically, the domain is still "running"
# 2. RHN Classic / Red Hat Satellite / Spacewalk are not able to
# display 'blocked' & 'nostate'
#    as valid states
# 3. to avoid 'Abuse of Service' messages: bugs #230106 and #546676

VIRT_STATE_NAME_MAP = ( 'running',  # VIR_DOMAIN_NOSTATE
                        'running',  # VIR_DOMAIN_RUNNING
                        'running',  # VIR_DOMAIN_BLOCKED
                        'paused',   # VIR_DOMAIN_PAUSED
                        'stopped',  # VIR_DOMAIN_SHUTDOWN
                        'stopped',  # VIR_DOMAIN_SHUTOFF
                        'crashed')  # VIR_DOMAIN_CRASHED

class EventType:
    EXISTS      = 'exists'
    REMOVED     = 'removed'
    FULLREPORT  = 'fullreport'

class TargetType:
    SYSTEM      = 'system'
    DOMAIN      = 'domain'

class VirtualizationType:
    PARA  = 'para_virtualized'
    FULLY = 'fully_virtualized'

class PropertyType:
    NAME        = 'name'
    UUID        = 'uuid'
    TYPE        = 'virt_type'
    MEMORY      = 'memory_size'
    VCPUS       = 'vcpus'
    STATE       = 'state'
    IDENTITY    = 'identity'
    ID          = 'id'
    MESSAGE     = 'message'

__virtualname__ = 'virtpoller'


###############################################################################
# PollerStateCache Class
###############################################################################

class PollerStateCache:

    ###########################################################################
    # Public Interface
    ###########################################################################

    def __init__(self, domain_data, cache_file = CACHE_DATA_PATH,
            expire_time = CACHE_EXPIRE_SECS):
        """
        This method creates a new poller state based on the provided domain
        list.  The domain_data list should be in the form returned from
        poller.poll_hypervisor.  That is,

             { uuid : { 'name'        : '...',
                        'uuid'        : '...',
                        'virt_type'   : '...',
                        'memory_size' : '...',
                        'vcpus'       : '...',
                        'state'       : '...' }, ... }
        """
        self.__expire_time = expire_time
        self.__cache_file = cache_file

        # Start by loading the old state, if necessary.
        self._load_state()
        self.__new_domain_data = domain_data

        # Now compare the given domain_data against the one loaded in the old
        # state.
        self._compare_domain_data()

        log.debug("Added: %s"    % repr(self.__added))
        log.debug("Removed: %s"  % repr(self.__removed))
        log.debug("Modified: %s" % repr(self.__modified))

    def save(self):
        """
        Updates the cache on disk with the latest domain data.
        """
        self._save_state()

    def is_expired(self):
        """
        Returns true if this cache is expired.
        """
        if self.__expire_time is None:
            return False
        else:
            return int(time.time()) >= self.__expire_time

    def is_changed(self):
        return self.__added or self.__removed or self.__modified

    def get_added(self):
        """
        Returns a list of uuids for each domain that has been added since the
        last state poll.
        """
        return self.__added

    def get_modified(self):
        """
        Returns a list of uuids for each domain that has been modified since
        the last state poll.
        """
        return self.__modified

    def get_removed(self):
        """
        Returns a list of uuids for each domain that has been removed since
        the last state poll.
        """
        return self.__removed

    ###########################################################################
    # Helper Methods
    ###########################################################################

    def _load_state(self):
        """
        Loads the last hypervisor state from disk.
        """
        # Attempt to open up the cache file.
        cache_file = None
        try:
            cache_file = open(self.__cache_file, 'rb')
        except IOError as ioe:
            # Couldn't open the cache file.  That's ok, there might not be one.
            # We'll only complain if debugging is enabled.
            log.debug("Could not open cache file '{0}': {1}".format(
                self.__cache_file, str(ioe)))

        # Now, if a previous state was cached, load it.
        state = {}
        if cache_file:
            try:
                state = pickle.load(cache_file)
            except pickle.PickleError as pe:
                # Strange.  Possibly, the file is corrupt.  We'll load an empty
                # state instead.
                log.debug("Error occurred while loading state: {0}".format(str(pe)))
            except EOFError:
                log.debug("Unexpected EOF. Probably an empty file.")
                cache_file.close()

            cache_file.close()

        if state:
            log.debug("Loaded state: {0}".format(repr(state)))

            self.__expire_time = int(state['expire_time'])

            # If the cache is expired, set the old data to None so we force
            # a refresh.
            if self.is_expired():
                self.__old_domain_data = None
                os.unlink(self.__cache_file)
            else:
                self.__old_domain_data = state['domain_data']

        else:
            self.__old_domain_data = None
            self.__expire_time     = None

    def _save_state(self):
        """
        Saves the given polling state to disk.
        """
        # First, ensure that the proper parent directory is created.
        cache_dir_path = os.path.dirname(self.__cache_file)
        if not os.path.exists(cache_dir_path):
            os.makedirs(cache_dir_path, 0o700)

        state = {}
        state['domain_data'] = self.__new_domain_data
        if self.__expire_time is None or self.is_expired():
            state['expire_time'] = int(time.time()) + CACHE_EXPIRE_SECS
        else:
            state['expire_time'] = self.__expire_time

        # Now attempt to open the file for writing.  We'll just overwrite
        # whatever's already there.  Also, let any exceptions bounce out.
        cache_file = open(self.__cache_file, "wb")
        pickle.dump(state, cache_file)
        cache_file.close()

    def _compare_domain_data(self):
        """
        Compares the old domain_data to the new domain_data.  Returns a tuple
        of lists, relative to the new domain_data:

            (added, removed, modified)
        """
        self.__added    = {}
        self.__removed  = {}
        self.__modified = {}

        # First, figure out the modified and added uuids.
        if self.__new_domain_data:
            for (uuid, new_properties) in list(self.__new_domain_data.items()):
                if not self.__old_domain_data or \
                    uuid not in self.__old_domain_data:

                    self.__added[uuid] = self.__new_domain_data[uuid]
                else:
                    old_properties = self.__old_domain_data[uuid]
                    if old_properties != new_properties:
                        self.__modified[uuid] = self.__new_domain_data[uuid]

        # Now, figure out the removed uuids.
        if self.__old_domain_data:
            for uuid in list(self.__old_domain_data.keys()):
                if not self.__new_domain_data or \
                    uuid not in self.__new_domain_data:

                    self.__removed[uuid] = self.__old_domain_data[uuid]


###############################################################################
### beacon                                                                  ###
###############################################################################

def __virtual__():
    return HAS_LIBVIRT and __virtualname__ or False


def validate(config):
    '''
    Validate the beacon configuration.
    '''
    if not isinstance(config, dict):
        return False, ('Configuration for virtpoller '
                       'beacon must be a dictionary.')
    else:
        return True, 'Configuration validated'


def beacon(config):
    '''
    polls the hypervisor for information about the currently
    running set of domains.

    Example Config

    .. code-block:: yaml

        beacons:
          virtpoller:
            expire_time: 21600
            cache_file: '/var/cache/virt_state.cache'
            interval: 320
    '''

    ret = []

    if not libvirt:
        log.trace("no libvirt")
        return ret

    try:
        conn = libvirt.openReadOnly(None)
    except libvirt.libvirtError as lve:
        log.error("Warning: Could not retrieve virtualization information! libvirtd service needs to be running.")
        conn = None

    if not conn:
        # No connection to hypervisor made
        return ret

    domains = conn.listAllDomains(0)

    state = {}
    for domain in domains:
        uuid = binascii.hexlify(domain.UUID())
        # SEE: http://libvirt.org/html/libvirt-libvirt.html#virDomainInfo
        # for more info.
        domain_info = domain.info()

        # Set the virtualization type.  We can tell if the domain is fully virt
        # by checking the domain's OSType() attribute.
        virt_type = VirtualizationType.PARA
        if domain.OSType().lower() == 'hvm':
            virt_type = VirtualizationType.FULLY

        # we need to filter out the small per/minute KB changes
        # that occur inside a vm.  To do this we divide by 1024 to
        # drop our precision down to megabytes with an int then
        # back up to KB
        memory = int(domain_info[2] / 1024);
        memory = memory * 1024;
        properties = {
            PropertyType.NAME   : domain.name(),
            PropertyType.UUID   : uuid,
            PropertyType.TYPE   : virt_type,
            PropertyType.MEMORY : str(memory), # current memory
            PropertyType.VCPUS  : domain_info[3],
            PropertyType.STATE  : VIRT_STATE_NAME_MAP[domain_info[0]] }

        state[uuid] = properties

    poller_state = PollerStateCache(state,
                                    cache_file = config.get('cache_file', CACHE_DATA_PATH),
                                    expire_time = config.get('expire_time', CACHE_EXPIRE_SECS))

    plan = []
    if poller_state.is_changed():
        added    = poller_state.get_added()
        removed  = poller_state.get_removed()
        modified = poller_state.get_modified()

        if poller_state.is_expired():
            item = {'time': int(time.time()),
                    'event_type': EventType.FULLREPORT,
                    'target_type': TargetType.DOMAIN }
            plan.append(item)

        for (uuid, data) in list(added.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.EXISTS,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

        for (uuid, data) in list(modified.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.EXISTS,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

        for (uuid, data) in list(removed.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.REMOVED,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

    poller_state.save()
    if len(plan) > 0:
        ret.append({'plan': plan})
    return ret
  070701000000B5000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001800000000susemanager-sls/src/doc   070701000000B6000081B400000000000000000000000160C1E96E000008C4000000000000000000000000000000000000002200000000susemanager-sls/src/doc/README.md # Uyuni configuration modules (`uyuni_config`) documentation

These execution and state modules allow to configure organizations, users, user permissions on channels and system groups on an Uyuni or SUSE Manager Server.

## General pillar data configuration

Virtually all functions in the modules leverage the XMLRPC API. It is thus necessary to provide an Uyuni/SUSE Manager administrator user name and password, with permissions on the entities to configure.

It is possible and recommended to configure those credentials in a pillar file with the following structure:
```
uyuni:
  xmlrpc:
    user: admin
    password: admin
```

## Detailed function documentation

Individual methods, parameters and return values are documented in `uyuni_config_execution_module_doc.txt` and `uyuni_config_state_module_doc.txt` in the same directory that contains this file.

## Examples

A few examples are provided:

- `examples/uyuni_config_hardcode.sls`: shows how to define an organization, a trust, a system group and a user with channel permissions. Note: all credentials are hardcoded directly in the file for simplicity's sake, but should at least be moved to pillars in a production environment
- `examples/ldap/uyuni_users_ldap.sls`: shows how to define multiple users based on data coming from an LDAP server via the LDAP pillar module. This allows to implement syncing LDAP users to Uyuni/SUSE Manager

### LDAP example specifics

Configuration notes:
- see  "General pillar data configuration" above for general credential configuration in pillars
- one more pillar needs to be defined in which organization administrator credentials are specified for each organization the state is going to create users in. An example with one organization can be found in `examples/ldap/pillar_orgs.yaml`
- in order to retrieve data from an LDAP server, the [pillar_ldap module](https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.pillar_ldap.html) is used, and needs its own configuration pillar. An example can be found in `examples/ldap/pillar_ldap.yaml`

In this particular example, the following LDAP fields are extracted in order to match corresponding Uyuni/SUSE Manager parameters:
- user name
- email
- first_name
- last_name
- roles
070701000000B7000081B400000000000000000000000160C1E96E00005839000000000000000000000000000000000000003E00000000susemanager-sls/src/doc/uyuni_config_execution_module_doc.txt === channel_list_manageable_channels
**(login, password)**
List all of manageable channels for the authenticated user

....
login: user login id
password: user password
....

    return: list of manageable channels for the user
    
=== channel_list_my_channels
**(login, password)**
List all of subscribed channels for the authenticated user

....
login: user login id
password: user password
....

    return: list of subscribed channels for the user

=== channel_software_is_globally_subscribable
**(channel_label, org_admin_user=None, org_admin_password=None)**
Returns whether the channel is globally subscribable on the organization

....
channel_label: label of the channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if channel is globally subscribable
    
=== channel_software_is_user_manageable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**

Returns whether the channel may be managed by the given user.

....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if user can manage channel or not
    
=== channel_software_is_user_subscribable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**

Returns whether the channel may be subscribed by the given user.

....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean which indicates if user subscribe the channel or not
    
=== channel_software_set_user_manageable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**

Set the manageable flag for a given channel and user.
If access is set to 'true', this method will give the user manage permissions to the channel.
Otherwise, that privilege is revoked.

....
channel_label: label of the channel
login: user login id
access: True if the user should have management access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== channel_software_set_user_subscribable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**

Set the subscribable flag for a given channel and user.
If value is set to 'true', this method will give the user subscribe permissions to the channel.
Otherwise, that privilege is revoked.

....
channel_label: label of the channel
login: user login id
access: True if the user should have subscribe access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== master_select_minions
**(target=None, target_type='glob')**

Return list minions from the configured Salt Master on the same host which match the expression on the defined target

....
target: target expression to filter minions
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.
....
        
    return: list of minion IDs
    
=== org_create
**(name, org_admin_user, org_admin_password, first_name, last_name, email, admin_prefix='Mr.', pam=False, admin_user=None, admin_password=None)**

Create an Uyuni organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
admin_prefix: organization admin prefix
pam:organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dictionary with org information
    
=== org_delete
**(name, admin_user=None, admin_password=None)**

Delete an organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_get_details
**(name, admin_user=None, admin_password=None)**

Get details of an organization.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: organization details
    
=== org_list_orgs
**(admin_user=None, admin_password=None)**

List all organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: list of all available organizations.
    
=== org_trust_add_trust
**(org_id, org_trust_id, admin_user=None, admin_password=None)**

Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: Organization id
org_trust_id: Trust organization id
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_add_trust_by_name
**(org_name, org_trust, admin_user=None, admin_password=None)**

Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: organization name
org_trust: Trust organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_list_orgs
**(org_admin_user=None, org_admin_password=None)**

List all organizations trusted by the authenticated user organization

....
org_admin_user: organization admin user
org_admin_password: organization admin password
....

    return: List of organization details
    
=== org_trust_list_trusts
**(org_name, admin_user=None, admin_password=None)**

List all trusts for one organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: Name of the organization to get the trusts
admin_user: authentication user
admin_password: authentication user password
....

    return: list with all organizations and their trust status
    
=== org_trust_remove_trust
**(org_id, org_untrust_id, admin_user=None, admin_password=None)**

Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: orgnization id
org_untrust_id: organizaton id to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_trust_remove_trust_by_name
**(org_name, org_untrust, admin_user=None, admin_password=None)**

Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_name: organization name
org_untrust: organization name to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: boolean, True indicates success
    
=== org_update_name
**(org_id, name, admin_user=None, admin_password=None)**

update an Uyuni organization name
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
org_id: organization internal id
name: new organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: organization details
    
=== systemgroup_add_remove_systems
**(name, add_remove, system_ids=[], org_admin_user=None, org_admin_password=None)**

Update systems on a system group.

....
name: Name of the system group.
add_remove: True to add to the group, False to remove.
system_ids: list of system ids to add/remove from group
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: boolean, True indicates success
    
=== systemgroup_create
**(name, descr, org_admin_user=None, org_admin_password=None)**

Create a system group.

....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systemgroup_delete
**(name, org_admin_user=None, org_admin_password=None)**

Delete a system group.

....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: boolean, True indicates success
    
=== systemgroup_get_details
**(name, org_admin_user=None, org_admin_password=None)**

Return system group details.

....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systemgroup_list_systems
**(name, minimal=True, org_admin_user=None, org_admin_password=None)**

List systems in a system group

....
name: Name of the system group.
minimal: default True. Only return minimal information about systems, use False to get more details
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: List of system information
    
=== systemgroup_update
**(name, descr, org_admin_user=None, org_admin_password=None)**

Update a system group.

....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: details of the system group
    
=== systems_get_minion_id_map
**(username=None, password=None, refresh=False)**

Returns a map from minion ID to Uyuni system ID for all systems a user has access to

....
username: username to authenticate
password: password for user
refresh: Get new data from server, ignoring values in local context cache
....

    return: Map between minion ID and system ID of all system accessible by authenticated user
    
=== user_add_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**

Add system groups to user's list of assigned system groups.

....
login: user id to look for
server_group_names: systems groups to add to list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_add_role
**(login, role, org_admin_user=None, org_admin_password=None)**
Adds a role to an Uyuni user.

....
login: user id to look for
role: role to be added to the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_create
**(login, password, email, first_name, last_name, use_pam_auth=False, org_admin_user=None, org_admin_password=None)**

Create an Uyuni user.

....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
use_pam_auth: if you wish to use PAM authentication for this user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_delete
**(login, org_admin_user=None, org_admin_password=None)**

Deletes an Uyuni user

....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_get_details
**(login, password=None, org_admin_user=None, org_admin_password=None)**

Get details of an Uyuni user
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar will be used

....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: The user information
    
=== user_list_assigned_system_groups
**(login, org_admin_user=None, org_admin_password=None)**

Returns the system groups that a user can administer.

....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of system groups that a user can administer
    
=== user_list_roles
**(login, password=None, org_admin_user=None, org_admin_password=None)**

Returns an Uyuni user roles.
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar are used

....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of user roles assigned
    
=== user_list_users
**(org_admin_user=None, org_admin_password=None)**

Return all Uyuni users visible to the authenticated user.

....
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: all users visible to the authenticated user
    
=== user_remove_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**

Remove system groups from a user's list of assigned system groups.

....
login: user id to look for
server_group_names: systems groups to remove from list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_remove_role
**(login, role, org_admin_user=None, org_admin_password=None)**

Remove a role from an Uyuni user.

....
login: user id to look for
role: role to be removed from the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
    
=== user_set_details
**(login, password, email, first_name=None, last_name=None, org_admin_user=None, org_admin_password=None)**

Update an Uyuni user.

....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_get_details
**(id, org_admin_user=None, org_admin_password=None)**

Get details of an Uyuni Activation Key

....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return:Activation Key information

=== activation_key_delete
**(id, org_admin_user=None, org_admin_password=None)**

Deletes an Uyuni Activation Key

....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_create
**(key, description, base_channel_label='', usage_limit=0, system_types=[], universal_default=False, org_admin_user=None, org_admin_password=None)**

Creates an Uyuni Activation Key

....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
system_types: system types to be assigned.
              Can be one of: 'virtualization_host', 'container_build_host',
              'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_set_details
**(key, description=None, contact_method=None, base_channel_label=None, usage_limit=None, universal_default=False, org_admin_user=None, org_admin_password=None)**

Updates an Uyuni Activation Key

....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**

Add a list of entitlements to an activation key.

....
key: activation key name
system_types: list of system types to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**

Remove a list of entitlements from an activation key.

....
key: activation key name
system_types: list of system types to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**

Add child channels to an activation key.

....
key: activation key name
child_channels: List of child channels to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**

Remove child channels from an activation key.

....
key: activation key name
child_channels: List of child channels to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_check_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Return the status of the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, true if enabled, false if disabled

=== activation_key_enable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Enables the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_disable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**

Disables the 'configure_after_registration' flag for an Activation Key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**

Add a list of packages to an activation key.

....
key: activation key name
packages: list of packages to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**

Remove a list of packages from an activation key.

....
key: activation key name
packages: list of packages to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_add_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**

Add a list of server groups to an activation key.

....
key: activation key name
server_groups: list of server groups to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_remove_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**

Remove a list of server groups from an activation key.

....
key: activation key name
server_groups: list of server groups to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success

=== activation_key_list_config_channels
**(key, org_admin_user=None, org_admin_password=None)**

List configuration channels associated to an activation key.

....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: List of configuration channels

=== activation_key_set_config_channels
**(keys, config_channel_label, org_admin_user=None, org_admin_password=None)**

Replace the existing set of configuration channels on the given activation keys.
Channels are ranked by their order in the array.

....
keys: list of activation key names
config_channel_label: list of configuration channels labels
org_admin_user: organization admin username
org_admin_password: organization admin password
....

    return: boolean, True indicates success
   070701000000B8000081B400000000000000000000000160C1E96E00001585000000000000000000000000000000000000003A00000000susemanager-sls/src/doc/uyuni_config_state_module_doc.txt === group_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni system group is not present

....
name: Group Name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== group_present
**(name, description, target=None, target_type='glob', org_admin_user=None, org_admin_password=None)**

Create or update an Uyuni system group

....
name: group name
description: group description
target: target expression used to filter which minions should be part of the group
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
            pillar_exact, compound, compound_pillar_exact. Default: glob.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== org_absent
**(name, admin_user=None, admin_password=None)**

Ensure an Uyuni organization is not present
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dict for Salt communication
    
=== org_present
**(name, org_admin_user, org_admin_password, first_name, last_name, email, pam=False, admin_user=None, admin_password=None)**

Create or update an Uyuni organization
Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
pam: organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....

    return: dict for Salt communication
    
=== org_trust
**(name, org_name, trusts, admin_user=None, admin_password=None)**

Establish trust relationships between Uyuni organizations.

....
name: state name
org_name: Organization name
trusts: list of organization names to trust
admin_user: administrator username
admin_password: administrator password
....

    return: dict for Salt communication
    
=== user_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni user is not present.

....
name: user login name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return:  dict for Salt communication
    
=== user_channels
**(name, password, manageable_channels=[], subscribable_channels=[], org_admin_user=None, org_admin_password=None)**

Ensure a user has access to the specified channels

....
name: user login name
password: user password
manageable_channels: channels user can manage
subscribable_channels: channels user can subscribe
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
    
=== user_present
**(name, password, email, first_name, last_name, use_pam_auth=False, roles=None, system_groups=None, org_admin_user=None, org_admin_password=None)**

Create or update an Uyuni user

....
name: user login name
password: desired password for the user
email: valid email address
first_name: First name
last_name: Last name
use_pam_auth: if you wish to use PAM authentication for this user
roles: roles to assign to user
system_groups: system_groups to assign to user
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication

=== activation_key_present
**(name, description, base_channel='', usage_limit=0, contact_method='default', system_types=[],
   universal_default=False, child_channels=[], configuration_channels=[], packages=[],
   server_groups=[], configure_after_registration=False, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni Activation Key is present.

....
name: the Activation Key name
description: the Activation description
base_channel: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
system_types: system types to be assigned.
              Can be one of: 'virtualization_host', 'container_build_host',
              'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
child_channels: list of child channels to be assigned
configuration_channels: list of configuration channels to be assigned
packages: list of packages which will be installed
server_groups: list of server groups to assign the activation key with
configure_after_registration: deploy configuration files to systems on registration
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication

=== activation_key_absent
**(name, org_admin_user=None, org_admin_password=None)**

Ensure an Uyuni Activation Key is not present.

....
name: the Activation Key name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....

    return: dict for Salt communication
   070701000000B9000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001D00000000susemanager-sls/src/examples  070701000000BA000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002200000000susemanager-sls/src/examples/ldap 070701000000BB000081B400000000000000000000000160C1E96E0000020B000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_ldap.yaml    ldap-roles:
  server:    ldap.example.com
  port:      389
  anonymous: true
  mode:      map
  dn:        ou=permissions,dc=example,dc=com
  filter:    '(objectclass=groupOfNames)'
  attrs:
    - cn
    - dn

ldap-users:
  server:    ldap.example.com
  port:      389
  anonymous: true
  mode:      map
  dn:        ou=people,dc=example,dc=com
  filter:    '(objectclass=person)'
  attrs:
    - givenName
    - sn
    - mail
    - uid
    - ou
    - dn
  lists:
    - memberOf

search_order:
  - ldap-roles
  - ldap-users
 070701000000BC000081B400000000000000000000000160C1E96E000000BC000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_orgs.yaml    uyuni:
  orgs:
    - org_id: MY-ORG
      org_admin_user: org_admin_user
      org_admin_password: org_admin_pass
      first_name: admin
      last_name: admin
      email: admin@org.com
070701000000BD000081B400000000000000000000000160C1E96E00000620000000000000000000000000000000000000003700000000susemanager-sls/src/examples/ldap/uyuni_users_ldap.sls    
## Create organizations based on static pillar data
{% set org_auth = {} %}

{% for org in pillar.get('uyuni', {}).get('orgs', []) %}
{{org['org_id']}}:
  uyuni.org_present:
    - name: {{org['org_id']}}
    - org_admin_user: {{org['org_admin_user']}}
    - org_admin_password: {{org['org_admin_password']}}
    - first_name: {{org['first_name']}}
    - last_name: {{org['last_name']}}
    - email: {{org['email']}}
{% set _ = org_auth.update({org.org_id: {'org_admin_user': org.org_admin_user,  'org_admin_password': org.org_admin_password }}) %}
{% endfor %}

## load available roles to local map variable
## those where extracted form ldap to pillar
{% set roles_map = {} %}
{% for role in pillar.get('ldap-roles', []) %}
{% set _ = roles_map.update({role.dn: role.cn}) %}
{% endfor %}

{% for user in pillar.get('ldap-users', []) %}

  {% set admin_user = None %}
  {% set admin_password = None %}
  {% if org_auth[user['ou']] %}
    {% set admin_user = org_auth[user['ou']].org_admin_user %}
    {% set admin_password = org_auth[user['ou']].org_admin_password %}
  {% endif %}

{{user['uid']}}:
  uyuni.user_present:
    - name: {{user['uid']}}
    - password: 'dummy_local_pass'
    - email: {{user['mail']}}
    - first_name: {{user['givenName']}}
    - last_name: {{user['sn']}}
    - use_pam_auth: true
    - org_admin_user: {{admin_user}}
    - org_admin_password: {{admin_password}}
    {% if user['memberOf'] %}
    - roles:
      {% for user_role in user['memberOf'] %}
      - {{ roles_map[user_role] }}
      {% endfor %}
    {% endif %}

{% endfor %}
070701000000BE000081B400000000000000000000000160C1E96E000007BA000000000000000000000000000000000000003700000000susemanager-sls/src/examples/uyuni_config_hardcode.sls    ## manage orgs
my_org:
  uyuni.org_present:
    - name: my_org
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - first_name: first_name
    - last_name: last_name__
    - email: my_org_user@org.com
    - admin_user: admin
    - admin_password: admin

org_trust_present:
  uyuni.org_trust:
    - org_name: SUSE
    - trusts:
      - my_org

# manager system groups
system_group_httpd:
  uyuni.group_present:
    - name: httpd_servers
    - description: httpd_servers
    - target: "*httpd*"
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user

#manager users
user_1:
  uyuni.user_present:
    - name: user1
    - password: user1
    - email: user1@teest.como
    - first_name: first
    - last_name: last
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - roles: ["system_group_admin", "channel_admin"]
    - system_groups:
      - httpd_servers

user_1_channels:
  ## remane it to user_channels (without _present)
  uyuni.user_channels:
    - name: user1
    - password: user1
    - org_admin_user: my_org_user
    - org_admin_password: my_org_user
    - manageable_channels:
      - my_local_channel
    - subscribable_channels:
      - new_local

define_custom_activation_key:
    uyuni.activation_key_present:
        - name: my-suse
        - description: "My Activation Key created via Salt"
        - org_admin_user: my_org_user
        - org_admin_password: my_org_user
        - base_channel: sle-product-sles15-sp2-pool-x86_64
        - child_channels:
            - sle-module-server-applications15-sp2-pool-x86_64
            - sle-module-server-applications15-sp2-updates-x86_64
        - configuration_channels:
            - firewall
        - packages:
            - name: emacs
              arch: x86_64
        - server_groups:
            - httpd_servers
        - usage_limit: 10
        - system_types:
            - virtualization_host
        - configure_after_registration: true
  070701000000BF000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001B00000000susemanager-sls/src/grains    070701000000C0000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002700000000susemanager-sls/src/grains/__init__.py    070701000000C1000081B400000000000000000000000160C1E96E00000F05000000000000000000000000000000000000002600000000susemanager-sls/src/grains/cpuinfo.py import logging
import salt.modules.cmdmod
import salt.utils
import os
import re
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def _lscpu(feedback):
    '''
    Use lscpu method

    :return:
    '''
    lscpu = _which_bin(['lscpu'])
    if lscpu is not None:
        try:
            log.debug("Trying lscpu to get CPU socket count")
            ret = __salt__['cmd.run_all']('{0} -p'.format(lscpu), output_loglevel='quiet')
            if ret['retcode'] == 0:
                max_socket_index = -1
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue
                    socket_index = int(line.split(',')[2])
                    if socket_index > max_socket_index:
                        max_socket_index = socket_index
                if max_socket_index > -1:
                    return {'cpusockets': (1 + max_socket_index)}
        except Exception as error:
            feedback.append("lscpu: {0}".format(str(error)))
            log.debug(str(error))


def _parse_cpuinfo(feedback):
    '''
    Use parsing /proc/cpuinfo method.

    :return:
    '''
    physids = set()
    if os.access("/proc/cpuinfo", os.R_OK):
        try:
            log.debug("Trying /proc/cpuinfo to get CPU socket count")
            with open('/proc/cpuinfo') as handle:
                for line in handle.readlines():
                    if line.strip().startswith('physical id'):
                        comps = line.split(':')
                        if len(comps) < 2 or len(comps[1]) < 2:
                            continue
                        physids.add(comps[1].strip())
            if physids:
                return {'cpusockets': len(physids)}
        except Exception as error:
            log.debug(str(error))
            feedback.append("/proc/cpuinfo: {0}".format(str(error)))
        else:
            feedback.append('/proc/cpuinfo: format is not applicable')


def _dmidecode(feedback):
    '''
    Use dmidecode method.

    :return:
    '''
    dmidecode = _which_bin(['dmidecode'])
    if dmidecode is not None:
        try:
            log.debug("Trying dmidecode to get CPU socket count")
            ret = __salt__['cmd.run_all']("{0} -t processor".format(dmidecode), output_loglevel='quiet')
            if ret['retcode'] == 0:
                count = 0
                for line in ret['stdout'].strip().splitlines():
                    if 'Processor Information' in line:
                        count += 1
                if count:
                    return {'cpusockets': count}
        except Exception as error:
            log.debug(str(error))
            feedback.append("dmidecode: {0}".format(str(error)))
    else:
        feedback.append("dmidecode: executable not found")


def cpusockets():
    """
    Returns the number of CPU sockets.
    """
    feedback = list()
    grains = _lscpu(feedback) or _parse_cpuinfo(feedback) or _dmidecode(feedback)
    if not grains:
        log.warn("Could not determine CPU socket count: {0}".format(' '.join(feedback)))

    return grains


def total_num_cpus():
    """ returns the total number of CPU in system.
    /proc/cpuinfo shows the number of active CPUs
    On s390x this can be different from the number of present CPUs in a system
    See IBM redbook: "Using z/VM for Test and Development Environments: A Roundup" chapter 3.5
    """
    re_cpu = re.compile(r"^cpu[0-9]+$")
    sysdev = '/sys/devices/system/cpu/'
    return {'total_num_cpus': len([cpud for cpud in (os.path.exists(sysdev) and os.listdir(sysdev) or list())
                                   if re_cpu.match(cpud)])}
   070701000000C2000081B400000000000000000000000160C1E96E00001285000000000000000000000000000000000000002B00000000susemanager-sls/src/grains/public_cloud.py    # -*- coding: utf-8 -*-
'''
Copyright (c) 2019 SUSE LLC

This software is licensed to you under the GNU General Public License,
version 2 (GPLv2). There is NO WARRANTY for this software, express or
implied, including the implied warranties of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
along with this software; if not, see
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.

This grain module is only loaded in case of a public cloud instance.

Supported Instances: AWS EC2, Azure and Google Compute Engine instances

Returns a grain called "instance_id" containing the virtual instance ID
according to the Public Cloud provider. The data is gathered using the
internal API available from within the instance.

Author: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Based on: https://docs.saltstack.com/en/latest/ref/grains/all/salt.grains.metadata.html
'''
from __future__ import absolute_import, print_function, unicode_literals

# Import python libs
import os
import socket
from multiprocessing.pool import ThreadPool
import logging

# Import salt libs
import salt.utils.http as http

# Internal metadata API information
INTERNAL_API_IP = '169.254.169.254'
HOST = 'http://{0}/'.format(INTERNAL_API_IP)

INSTANCE_ID = None

AMAZON_URL_PATH = 'latest/meta-data/'
AZURE_URL_PATH = 'metadata/instance/compute/'
AZURE_API_ARGS = '?api-version=2017-08-01&format=text'
GOOGLE_URL_PATH = 'computeMetadata/v1/instance/'

log = logging.getLogger(__name__)


def __virtual__():
    global INSTANCE_ID
    log.debug("Checking if minion is running in the public cloud")
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.settimeout(0.1)
    result = sock.connect_ex((INTERNAL_API_IP, 80))
    if result != 0:
        return False

    def _do_api_request(data):
        opts = {
            'http_connect_timeout': 0.1,
            'http_request_timeout': 0.1,
        }
        try:
            ret = {
                data[0]: http.query(data[1],
                                    status=True,
                                    header_dict=data[2],
                                    raise_error=False,
                                    opts=opts)
            }
        except:
            ret = { data[0]: dict() }
        return ret

    api_check_dict = [
        ('amazon', os.path.join(HOST, AMAZON_URL_PATH), None),
        ('google', os.path.join(HOST, GOOGLE_URL_PATH), {"Metadata-Flavor": "Google"}),
        ('azure', os.path.join(HOST, AZURE_URL_PATH) + AZURE_API_ARGS, {"Metadata":"true"}),
    ]

    api_ret = {}
    results = []

    try:
       pool = ThreadPool(3)
       results = pool.map(_do_api_request, api_check_dict)
       pool.close()
       pool.join()
    except Exception as exc:
       import traceback
       log.error(traceback.format_exc())
       log.error("Exception while creating a ThreadPool for accessing metadata API: %s", exc)

    for i in results:
        api_ret.update(i)

    if _is_valid_endpoint(api_ret['amazon'], 'instance-id'):
        INSTANCE_ID = http.query(os.path.join(HOST, AMAZON_URL_PATH, 'instance-id'), raise_error=False)['body']
        return True
    elif _is_valid_endpoint(api_ret['azure'], 'vmId'):
        INSTANCE_ID = http.query(os.path.join(HOST, AZURE_URL_PATH, 'vmId') + AZURE_API_ARGS, header_dict={"Metadata":"true"}, raise_error=False)['body']
        return True
    elif _is_valid_endpoint(api_ret['google'], 'id'):
        INSTANCE_ID = http.query(os.path.join(HOST, GOOGLE_URL_PATH, 'id'), header_dict={"Metadata-Flavor": "Google"}, raise_error=False)['body']
        return True

    return False


def _is_valid_endpoint(response, tag):
    if not response.get('status', 0) == 200:
        return False
    elif not tag in response.get('body', ''):
        return False
    elif ' ' in response.get('body', ''):
        return False
    else:
        return True


def _is_valid_instance_id(id_str):
    if not id_str:
        return False
    if os.linesep in id_str:
        return False
    elif ' ' in id_str:
        return False
    elif len(id_str) > 128:
        return False
    else:
        return True


def instance_id():
    global INSTANCE_ID
    ret = {}
    if _is_valid_instance_id(INSTANCE_ID):
        log.debug("This minion is running in the public cloud. Adding instance_id to grains: {}".format(INSTANCE_ID))
        ret['instance_id'] = INSTANCE_ID
    else:
        log.error("The obtained public cloud instance id doesn't seems correct: {}".format(INSTANCE_ID))
        log.error("Skipping")
    return ret

def is_payg_instance():
    ret = {}
    if os.path.isfile('/usr/sbin/registercloudguest'):
        ret['is_payg_instance'] = True
    return ret
   070701000000C3000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001C00000000susemanager-sls/src/modules   070701000000C4000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002800000000susemanager-sls/src/modules/__init__.py   070701000000C5000081B400000000000000000000000160C1E96E00002C95000000000000000000000000000000000000002900000000susemanager-sls/src/modules/kiwi_info.py  import salt.exceptions
import logging
import os
import re
import hashlib
import pickle

log = logging.getLogger(__name__)

def parse_profile(chroot):
    ret = {}
    path = os.path.join(chroot, 'image', '.profile')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)='(?P<val>.*)'")
        for line in profile.splitlines():
            match = pattern.match(line)
            if match:
                ret[match.group('name')] = match.group('val')
    return ret

def parse_buildinfo(dest):
    ret = {}
    path = os.path.join(dest, 'kiwi.buildinfo')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern_group = re.compile(r"^\[(?P<name>.*)\]")
        pattern_val = re.compile(r"^(?P<name>.*?)=(?P<val>.*)")

        group = ret
        for line in profile.splitlines():
            match = pattern_group.match(line)
            if match:
                group = {}
                ret[match.group('name')] = group

            match = pattern_val.match(line)
            if match:
                group[match.group('name')] = match.group('val')
    return ret

# fallback for SLES11 Kiwi and for Kiwi NG that does not create the buildinfo file
def guess_buildinfo(dest):
    ret = {'main': {}}
    files = __salt__['file.readdir'](dest)

    pattern_basename = re.compile(r"^(?P<basename>.*)\.packages$")
    pattern_pxe_initrd = re.compile(r"^initrd-netboot.*")
    pattern_pxe_kiwi_ng_initrd = re.compile(r".*\.initrd\..*")
    pattern_pxe_kernel = re.compile(r".*\.kernel\..*")
    pattern_pxe_kiwi_ng_kernel = re.compile(r".*\.kernel$")
    have_kernel = False
    have_initrd = False

    for f in files:
        match = pattern_basename.match(f)
        if match:
            ret['main']['image.basename'] = match.group('basename')

        match = pattern_pxe_initrd.match(f) or pattern_pxe_kiwi_ng_initrd.match(f)
        if match:
            have_initrd = True

        match = pattern_pxe_kernel.match(f) or pattern_pxe_kiwi_ng_kernel.match(f)
        if match:
            have_kernel = True

    if have_kernel and have_initrd:
        ret['main']['image.type'] = 'pxe'
    return ret

# Kiwi NG
def parse_kiwi_result(dest):
    path = os.path.join(dest, 'kiwi.result')
    ret = {}
    if __salt__['file.file_exists'](path):
        try:
            # pickle depends on availability of python kiwi modules
            # which are not under our control so there is certain risk of failure
            # return empty dict in such case
            # the caller should handle all values as optional
            with open(path, 'rb') as f:
                result = pickle.load(f)
                ret['arch'] = result.xml_state.host_architecture
                ret['basename'] = result.xml_state.xml_data.name
                ret['type'] = result.xml_state.build_type.image
                ret['filesystem'] = result.xml_state.build_type.filesystem
                ret['initrd_system'] = result.xml_state.build_type.initrd_system
        except:
            log.exception("Loading kiwi.result")
            # continue with empty dict
    return ret

def parse_packages(path):
    ret = []
    if __salt__['file.file_exists'](path):
        packages = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)\|(?P<epoch>.*?)\|(?P<version>.*?)\|(?P<release>.*?)\|(?P<arch>.*?)\|(?P<disturl>.*?)(\|(?P<license>.*))?$")
        for line in packages.splitlines():
            match = pattern.match(line)
            if match:
                # translate '(none)' values to ''
                d = match.groupdict()
                for k in list(d.keys()):
                    if d[k] == '(none)':
                        d[k] = ''

                # if arch is '' and name begins gpg-pubkey then skip the package
                if d['arch'] == '' and d['name'].startswith('gpg-pubkey'):
                    continue

                ret.append(d)
    return ret

def get_md5(path):
    res = {}
    if not __salt__['file.file_exists'](path):
        return res

    res['hash'] = __salt__['file.get_hash'](path, form='md5')
    res['size'] = __salt__['file.stats'](path).get('size')
    return res

def parse_kiwi_md5(path, compressed = False):
    res = {}

    if not __salt__['file.file_exists'](path):
        return res

    md5_str = __salt__['cp.get_file_str'](path)
    if md5_str is not None:
        if compressed:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s+(?P<csize1>[0-9]+)\s+(?P<csize2>[0-9]+)\s*$")
        else:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s*$")
        match = pattern.match(md5_str)
        if match:
            res['hash'] = match.group('md5')
            res['size'] = int(match.group('size1')) * int(match.group('size2'))
            if compressed:
                res['compressed_size'] = int(match.group('csize1')) * int(match.group('csize2'))
    return res

_compression_types = [
    { 'suffix': '.gz', 'compression': 'gzip' },
    { 'suffix': '.bz', 'compression': 'bzip' },
    { 'suffix': '.xz', 'compression': 'xz' },
    { 'suffix': '.install.iso',    'compression': None },
    { 'suffix': '.iso',            'compression': None },
    { 'suffix': '.raw',            'compression': None },
    { 'suffix': '',    'compression': None }
    ]

def image_details(dest, bundle_dest = None):
    res = {}
    buildinfo = parse_buildinfo(dest) or guess_buildinfo(dest)
    kiwiresult = parse_kiwi_result(dest)

    basename = buildinfo.get('main', {}).get('image.basename', '')
    image_type = kiwiresult.get('type') or buildinfo.get('main', {}).get('image.type', 'unknown')
    fstype = kiwiresult.get('filesystem')

    pattern = re.compile(r"^(?P<name>.*)\.(?P<arch>.*)-(?P<version>.*)$")
    match = pattern.match(basename)
    if match:
        name = match.group('name')
        arch = match.group('arch')
        version = match.group('version')
    else:
        return None

    filename = None
    filepath = None
    compression = None
    for c in _compression_types:
        path = os.path.join(dest, basename + c['suffix'])
        if __salt__['file.file_exists'](path):
            compression = c['compression']
            filename = basename + c['suffix']
            filepath = path
            break

    res['image'] = {
        'basename': basename,
        'name': name,
        'arch': arch,
        'type': image_type,
        'version': version,
        'compression': compression,
        'filename': filename,
        'filepath': filepath,
        'fstype': fstype
    }

    res['image'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5'), compression is not None))

    if bundle_dest is not None:
      res['bundle'] = inspect_bundle(bundle_dest, basename)

    return res

def inspect_image(dest, bundle_dest = None):
    res = image_details(dest, bundle_dest)
    if not res:
      return None

    basename = res['image']['basename']
    image_type = res['image']['type']

    for fstype in ['ext2', 'ext3', 'ext4', 'btrfs', 'xfs']:
        path = os.path.join(dest, basename + '.' + fstype)
        if __salt__['file.file_exists'](path) or __salt__['file.is_link'](path):
            res['image']['fstype'] = fstype
            break

    res['packages'] = parse_packages(os.path.join(dest, basename + '.packages'))

    if image_type == 'pxe':
        res['boot_image'] = inspect_boot_image(dest)

    return res


def inspect_boot_image(dest):
    res = None
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<name>.*)\.(?P<arch>.*)-(?P<version>.*)\.kernel\.(?P<kernelversion>.*)\.md5$")
    pattern_kiwi_ng = re.compile(r"^(?P<name>[^-]*)\.(?P<arch>[^-]*)-(?P<version>[^-]*)-(?P<kernelversion>.*)\.kernel$")
    for f in files:
        match = pattern.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                    },
                'kiwi_ng': False
            }
            break
        match = pattern_kiwi_ng.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                },
                'kiwi_ng': True
            }
            break

    if res is None:
        return None

    for c in _compression_types:
        if res['kiwi_ng']:
            path = basename + '.initrd' + c['suffix']
        else:
            path = basename + c['suffix']
        if __salt__['file.file_exists'](os.path.join(dest, path)):
            res['initrd']['filename'] = path

            if res['kiwi_ng']:
                res['initrd'].update(get_md5(os.path.join(dest, path)))
            else:
                res['initrd'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5')))
            break

    if res['kiwi_ng']:
        path = os.path.join(dest, basename + '-' + res['kernel']['version'] + '.kernel')
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '-' + res['kernel']['version'] + '.kernel'
            res['kernel'].update(get_md5(path))
    else:
        path = os.path.join(dest, basename + '.kernel.' + res['kernel']['version'])
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '.kernel.' + res['kernel']['version']
            res['kernel'].update(parse_kiwi_md5(path + '.md5'))

    return res

def inspect_bundle(dest, basename):
    res = None
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<basename>" + re.escape(basename) + r")-(?P<id>[^.]*)\.(?P<suffix>.*)\.sha256$")
    for f in files:
        match = pattern.match(f)
        if match:
            res = match.groupdict()
            sha256_file = f
            break
    if res is None:
        return None

    sha256_str = __salt__['cp.get_file_str'](os.path.join(dest, sha256_file))
    pattern = re.compile(r"^(?P<hash>[0-9a-f]+)\s+(?P<filename>.*)\s*$")
    match = pattern.match(sha256_str)
    if match:
        d = match.groupdict()
        d['hash'] = 'sha256:{0}'.format(d['hash'])
        res.update(d)
        res['filepath'] = os.path.join(dest, res['filename'])

    else:
        # only hash without file name
        pattern = re.compile(r"^(?P<hash>[0-9a-f]+)$")
        match = pattern.match(sha256_str)
        if match:
            res['hash'] = 'sha256:{0}'.format(match.groupdict()['hash'])
            res['filename'] = sha256_file[0:-len('.sha256')]
            res['filepath'] = os.path.join(dest, res['filename'])

    return res
   070701000000C6000081B400000000000000000000000160C1E96E00000FEB000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/kiwi_source.py    import salt.exceptions
import logging
import os
from tempfile import mkdtemp
try:
    from urllib.parse import urlparse
except ImportError:
     from urlparse import urlparse

log = logging.getLogger(__name__)

# valid prefixes taken from Docker-CE to be compatible
valid_git_prefixes = ['http://', 'https://', 'git://', 'github.com/', 'git@']
valid_url_prefixes = ['http://', 'https://']
valid_url_suffixes = ['.tar.gz', '.tar.xz', '.tar.bz2', '.tgz', '.tar']

def _isLocal(source):
  return __salt__['file.directory_exists'](source)

def _isGit(source):
  for prefix in valid_git_prefixes:
    if source.startswith(prefix):
      return True
  return False

def _isTarball(source):
  prefix_ok = False
  for prefix in valid_url_prefixes:
    if source.startswith(prefix):
      prefix_ok = True
      break

  if not prefix_ok:
    return False

  for suffix in valid_url_suffixes:
    if source.endswith(suffix):
      return True

  return False

def _prepareDestDir(dest):
  '''
  Check target directory does not exists
  '''
  if os.path.isdir(dest):
    raise salt.exceptions.SaltException('Working directory "{0}" exists before sources are prepared'.format(dest))

def _prepareLocal(source, dest):
  '''
  Make link from `source` to `dest`
  '''
  log.debug('Source is local directory')
  _prepareDestDir(dest)
  __salt__['file.symlink'](source, dest)
  return dest

def _prepareHTTP(source, dest):
  '''
  Download tarball and extract to the directory
  '''
  log.debug('Source is HTTP')
  _prepareDestDir(dest)

  filename = os.path.join(dest, source.split("/")[-1])
  res = __salt__['state.single']('file.managed', filename, source=source, makedirs=True, skip_verify=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  res = __salt__['state.single']('archive.extracted', name=dest, source=filename, skip_verify=True, overwrite=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  return dest

def _prepareGit(source, dest, root):
  _prepareDestDir(dest)

  # checkout git into temporary directory in our build root
  # this is needed if we are interested only in git subtree
  tmpdir = __salt__['temp.dir'](parent=root)

  rev = 'master'
  subdir = None
  url = None

  # parse git uri - i.e. git@github.com/repo/#rev:sub
  # compatible with docker as per https://docs.docker.com/engine/reference/commandline/build/#git-repositories

  try:
    url, fragment = source.split('#', 1)
    try:
      rev, subdir = fragment.split(':', 1)
    except:
      rev = fragment
  except:
    url = source

  # omitted rev means default 'master' branch revision
  if rev == '':
    rev = 'master'

  log.debug('GIT URL: {0}, Revision: {1}, subdir: {2}'.format(url, rev, subdir))
  __salt__['git.init'](tmpdir)
  __salt__['git.remote_set'](tmpdir, url)
  __salt__['git.fetch'](tmpdir)
  __salt__['git.checkout'](tmpdir, rev=rev)

  if subdir:
    if _isLocal(os.path.join(tmpdir, subdir)):
      __salt__['file.symlink'](os.path.join(tmpdir, subdir), dest)
    else:
      raise salt.exceptions.SaltException('Directory is not present in checked out source: {}'.format(subdir))
  else:
    __salt__['file.symlink'](tmpdir, dest)
  return dest

def prepare_source(source, root):
  '''
  Prepare source directory based on different source types.

  source -- string with either local directory path, remote http(s) archive or git repository
  root   -- local directory where to store processed source files

  For git repository following format is understood:
    [http[s]://|git://][user@]hostname/repository[#revision[:subdirectory]]
  '''
  dest = os.path.join(root, 'source')
  log.debug('Preparing build source for {0} to {1}'.format(source, dest))
  if _isLocal(source):
    return _prepareLocal(source, dest)
  elif _isTarball(source):
    return _prepareHTTP(source, dest)
  elif _isGit(source):
    return _prepareGit(source, dest, root)
  else:
    raise salt.exceptions.SaltException('Unknown source format "{0}"'.format(source))
 070701000000C7000081B400000000000000000000000160C1E96E0000041F000000000000000000000000000000000000003000000000susemanager-sls/src/modules/mainframesysinfo.py   # -*- coding: utf-8 -*-
'''
s390 utility for Suse Manager

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
import os

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only works if /usr/bin/read_values is accessible
    '''
    return os.access('/usr/bin/read_values', os.X_OK) or \
        os.access('/proc/sysinfo', os.R_OK)


def read_values():
    '''
    Executes /usr/bin/read_values or if not available
    falls back to 'cat /proc/sysinfo'

    CLI Example:

    .. code-block:: bash

        salt '*' mainframesysinfo.read_values
    '''
    if os.access('/usr/bin/read_values', os.X_OK):
        cmd = '/usr/bin/read_values -s'
    else:    
        cmd = 'cat /proc/sysinfo'
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
        raise CommandExecutionError(result['stderr'])

    return result['stdout'] 070701000000C8000081B400000000000000000000000160C1E96E00004891000000000000000000000000000000000000003100000000susemanager-sls/src/modules/mgr_caasp_manager.py  # -*- coding: utf-8 -*-
'''
SUSE Manager CaaSP Cluster Manager module for Salt

'''
from __future__ import absolute_import


import logging
import os
import subprocess

import salt.utils.stringutils
import salt.utils.timed_subprocess

try:
    from salt.utils.path import which
except ImportError:
    from salt.utils import which

from salt.utils.dictupdate import merge_list
from salt.exceptions import CommandExecutionError


log = logging.getLogger(__name__)

__virtualname__ = 'caasp'

DEFAULT_TIMEOUT = 1200


def __virtual__():
    '''
    This module requires that 'skuba' and 'kubectl' CLI tools are available.
    '''
    if not which('skuba'):
        return (False, 'skuba is not available in the minion')
    if not which('kubectl'):
        return (False, 'kubectl is not available in the minion')
    else:
        return __virtualname__


def _call_skuba(skuba_cluster_path,
                cmd_args,
                timeout=DEFAULT_TIMEOUT,
                **kwargs):

    log.debug("Calling Skuba CLI: 'skuba {}' - Timeout: {}".format(cmd_args, timeout))
    try:
        skuba_proc = salt.utils.timed_subprocess.TimedProc(
            ["skuba"] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout,
            cwd=skuba_cluster_path,
        )
        skuba_proc.run()
        return skuba_proc
    except Exception as exc:
        error_msg = "Unexpected error while calling skuba: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)


def _call_kubectl(kubectl_config_path,
                  cmd_args,
                  timeout=DEFAULT_TIMEOUT,
                  **kwargs):

    newenv = os.environ
    newenv['KUBECONFIG'] = os.path.join(kubectl_config_path, 'admin.conf')

    log.debug("Calling kubectl CLI: 'kubectl {}' - KUBECONFIG: {} - Timeout: {}".format(cmd_args, newenv['KUBECONFIG'], timeout))
    try:
        kubectl_proc = salt.utils.timed_subprocess.TimedProc(
            ["kubectl"] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout,
            cwd=kubectl_config_path,
            env=newenv,
        )
        kubectl_proc.run()
        return kubectl_proc
    except Exception as exc:
        error_msg = "Unexpected error while calling kubectl: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)


def _sanitize_skuba_output_values(items):
    ret = []
    for i in items:
        if i.lower() == 'no':
            ret.append(False)
        elif i.lower() == 'yes':
            ret.append(True)
        elif i.lower() == '<none>':
            ret.append(None)
        else:
            ret.append(i)
    return ret


def list_nodes(skuba_cluster_path,
               timeout=DEFAULT_TIMEOUT,
               **kwargs):
    skuba_proc = _call_skuba(skuba_cluster_path, "cluster status", timeout=timeout)
    if skuba_proc.process.returncode != 0 or skuba_proc.stderr:
        error_msg = "Unexpected error {} at skuba when listing nodes: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    skuba_proc_lines = salt.utils.stringutils.to_str(skuba_proc.stdout).splitlines()

    ret = {}
    try:
        # The first line of skuba output are the headers
        headers = [x.strip().lower() for x in skuba_proc_lines[0].split('  ') if x]
        name_idx = headers.index('name')
        headers.remove('name')
        for line in skuba_proc_lines[1:]:
            items = [x.strip() for x in line.split('  ') if x]
            node_name = items.pop(name_idx)
            node_zip = zip(headers, _sanitize_skuba_output_values(items))
            ret[node_name] = dict(node_zip)
    except Exception as exc:
        error_msg = "Unexpected error while parsing skuba output: {}".format(exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    # The following is a hack to enrich skuba result with the machine-id of every node
    # We need to query kubectl to retrieve the machine-id
    kubectl_proc = _call_kubectl(skuba_cluster_path, "get nodes -o json", timeout=timeout)
    if kubectl_proc.process.returncode != 0 or kubectl_proc.stderr:
        error_msg = "Unexpected error {} at kubectl when getting nodes: {}".format(
                kubectl_proc.process.returncode,
                salt.utils.stringutils.to_str(kubectl_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    kubectl_response = salt.utils.yaml.safe_load(kubectl_proc.stdout)

    for node in kubectl_response.get('items', []):
        node_name = node['metadata']['name']
        if node_name in ret.keys():
            ret[node_name]['machine-id'] = node['status']['nodeInfo']['machineID']
            ret[node_name]['internal-ips'] = list(map(lambda x: x['address'],
                                                  filter(lambda x: x['type'] == "InternalIP",
                                                         node['status']['addresses'])))
        else:
            error_msg = "Node returned from Kubernetes API not known to skuba: {}".format(node.metadata.name)
            log.error(error_msg)

    return ret


def remove_node(skuba_cluster_path,
                node_name,
                drain_timeout=None,
                verbosity=None,
                timeout=DEFAULT_TIMEOUT,
                **kwargs):

    cmd_args = "node remove {}".format(node_name)

    if drain_timeout:
        cmd_args += " --drain-timeout {}".format(drain_timeout)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when removing a node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def add_node(skuba_cluster_path,
             node_name,
             role,
             target,
             ignore_preflight_errors=None,
             port=None,
             sudo=None,
             user=None,
             verbosity=None,
             timeout=DEFAULT_TIMEOUT,
             **kwargs):

    cmd_args = "node join --role {} --target {} {}".format(role, target, node_name)

    if ignore_preflight_errors:
        cmd_args += " --ignore-preflight-errors {}".format(ignore_preflight_errors)
    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when adding a new node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def _upgrade_cluster_plan(skuba_cluster_path,
                          verbosity=None,
                          timeout=DEFAULT_TIMEOUT,
                          **kwargs):

    cmd_args = "cluster upgrade plan"

    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading the cluster: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def upgrade_cluster(skuba_cluster_path,
                    verbosity=None,
                    timeout=DEFAULT_TIMEOUT,
                    plan=False,
                    **kwargs):

    if plan:
        return _upgrade_cluster_plan(skuba_cluster_path=skuba_cluster_path,
                                     verbosity=verbosity,
                                     timeout=timeout,
                                     **kwargs)

    # Perform the cluster upgrade procedure.
    # 1. Upgrade addons
    # 2. Upgrade all nodes
    # 3. Upgrade addons
    ret = {
        'success' : True,
        'retcode' : 0,
        'stage0_upgrade_addons': {},
        'stage1_upgrade_nodes': {},
        'stage2_upgrade_addons': {},
    }

    ret['stage0_upgrade_addons'] = upgrade_addons(skuba_cluster_path=skuba_cluster_path,
                                                  verbosity=verbosity,
                                                  timeout=timeout,
                                                  plan=plan,
                                                  **kwargs)

    if not ret['stage0_upgrade_addons']['success']:
        ret['success'] = False
        return ret

    nodes = list_nodes(skuba_cluster_path=skuba_cluster_path,
                       timeout=timeout,
                       **kwargs)

    # Ensure master nodes are upgraded first
    for node, _ in sorted(nodes.items(), key=lambda x: 0 if x[1].get('role') == 'master' else 1):
        if not nodes[node]['internal-ips']:
            log.error('No internal-ips defined for node: {}. Cannot proceed upgrading this node!'.format(node))
            continue

        ret['stage1_upgrade_nodes'][node] = upgrade_node(skuba_cluster_path=skuba_cluster_path,
                                                         target=nodes[node]['internal-ips'][0],
                                                         verbosity=verbosity,
                                                         timeout=timeout,
                                                         plan=plan,
                                                         **kwargs)

        if not ret['stage1_upgrade_nodes'][node]['success']:
            ret['success'] = False

    ret['stage2_upgrade_addons'] = upgrade_addons(skuba_cluster_path=skuba_cluster_path,
                                                  verbosity=verbosity,
                                                  timeout=timeout,
                                                  plan=plan,
                                                  **kwargs)

    if not ret['stage2_upgrade_addons']['success']:
        ret['success'] = False

    if not ret['success']:
        ret['retcode'] = 1

    return ret


def upgrade_addons(skuba_cluster_path,
                   verbosity=None,
                   timeout=DEFAULT_TIMEOUT,
                   plan=False,
                   **kwargs):

    cmd_args = "addon upgrade {}".format("plan" if plan else "apply")

    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading addons: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def upgrade_node(skuba_cluster_path,
                 node_name=None,
                 target=None,
                 port=None,
                 sudo=None,
                 user=None,
                 verbosity=None,
                 timeout=DEFAULT_TIMEOUT,
                 plan=False,
                 **kwargs):

    if plan and not node_name:
        error_msg = "The 'node_name' argument is required if plan=True"
        log.error(error_msg)
        raise CommandExecutionError(error_msg)
    elif not plan and not target:
        error_msg = "The 'target' argument is required without plan=True"
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    if plan:
        cmd_args = "node upgrade plan {}".format(node_name)
    else:
        cmd_args = "node upgrade apply --target {}".format(target)

    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when upgrading node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def cluster_init(cluster_name,
                 cluster_basedir,
                 target,
                 cloud_provider=None,
                 strict_capability_defaults=False,
                 verbosity=None,
                 timeout=DEFAULT_TIMEOUT,
                 **kwargs):

    cmd_args = "cluster init --control-plane {} {}".format(target, cluster_name)

    if cloud_provider:
        cmd_args += " --cloud-provider {}".format(cloud_provider)
    if strict_capability_defaults:
        cmd_args += " --strict-capability-defaults"
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(cluster_basedir, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when initializing the cluster: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def master_bootstrap(node_name,
                     skuba_cluster_path,
                     target,
                     ignore_preflight_errors=None,
                     port=None,
                     sudo=None,
                     user=None,
                     verbosity=None,
                     timeout=DEFAULT_TIMEOUT,
                     **kwargs):

    cmd_args = "node bootstrap --target {} {}".format(target, node_name)

    if ignore_preflight_errors:
        cmd_args += " --ignore-preflight-errors {}".format(ignore_preflight_errors)
    if port:
        cmd_args += " --port {}".format(port)
    if sudo:
        cmd_args += " --sudo"
    if user:
        cmd_args += " --user {}".format(user)
    if verbosity:
        cmd_args += " --verbosity {}".format(verbosity)

    skuba_proc = _call_skuba(skuba_cluster_path, cmd_args, timeout=timeout)
    if skuba_proc.process.returncode != 0:
        error_msg = "Unexpected error {} at skuba when bootstrapping the node: {}".format(
                skuba_proc.process.returncode,
                salt.utils.stringutils.to_str(skuba_proc.stderr))
        log.error(error_msg)

    ret = {
        'stdout': salt.utils.stringutils.to_str(skuba_proc.stdout),
        'stderr': salt.utils.stringutils.to_str(skuba_proc.stderr),
        'success': not skuba_proc.process.returncode,
        'retcode': skuba_proc.process.returncode,
    }
    return ret


def _join_return_dicts(ret1, ret2):
    ret = merge_list(ret1, ret2)

    # Join multiple 'stdout' and 'stderr' outputs
    # after merging the two output dicts
    if isinstance(ret['stdout'], list):
        ret['stdout'] = ''.join(ret['stdout'])
    if isinstance(ret['stderr'], list):
        ret['stderr'] = ''.join(ret['stderr'])

    # We only need the latest 'success' and 'retcode'
    # values after merging the two output dicts.
    ret['success'] = ret['success'][1]
    ret['retcode'] = ret['retcode'][1]

    return ret

def create_cluster(cluster_name,
                   cluster_basedir,
                   first_node_name,
                   target,
                   cloud_provider=None,
                   strict_capability_defaults=False,
                   load_balancer=None,
                   verbosity=None,
                   timeout=DEFAULT_TIMEOUT,
                   **kwargs):

    ret = cluster_init(cluster_name=cluster_name,
                       cluster_basedir=cluster_basedir,
                       target=load_balancer if load_balancer else target,
                       cloud_provider=cloud_provider,
                       strict_capability_defaults=strict_capability_defaults,
                       verbosity=verbosity,
                       timeout=timeout,
                       **kwargs)

    if not ret['success']:
        return ret

    ret = _join_return_dicts(ret, master_bootstrap(node_name=first_node_name,
                                                   skuba_cluster_path=os.path.join(cluster_basedir, cluster_name),
                                                   target=target,
                                                   verbosity=verbosity,
                                                   timeout=timeout,
                                                   **kwargs))

    return ret
   070701000000C9000081B400000000000000000000000160C1E96E0000190D000000000000000000000000000000000000002F00000000susemanager-sls/src/modules/mgractionchains.py    # -*- coding: utf-8 -*-
'''
SUSE Manager Action Chains module for Salt

'''
from __future__ import absolute_import

import logging
import os
import sys
import salt.config
import salt.syspaths
import yaml

# Prevent issues due 'salt.utils.fopen' deprecation
try:
    from salt.utils import fopen
except:
    from salt.utils.files import fopen

from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'mgractionchains'

SALT_ACTIONCHAIN_BASE = 'actionchains'


def __virtual__():
    '''
    This module is always enabled while 'state.sls' is available.
    '''
    return __virtualname__ if 'state.sls' in __salt__ else (False, 'state.sls is not available')

def _calculate_sls(actionchain_id, machine_id, chunk):
    return '{0}.actionchain_{1}_{2}_{3}'.format(SALT_ACTIONCHAIN_BASE,
                                                actionchain_id,
                                                machine_id,
                                                chunk)

def _get_ac_storage_filenamepath():
    '''
    Calculate the filepath to the '_mgractionchains.conf' which is placed
    by default in /etc/salt/minion.d/
    '''
    config_dir = __opts__.get('conf_dir', None)
    if config_dir is None and 'conf_file' in __opts__:
        config_dir = os.path.dirname(__opts__['conf_file'])
    if config_dir is None:
        config_dir = salt.syspaths.CONFIG_DIR

    minion_d_dir = os.path.join(
        config_dir,
        os.path.dirname(__opts__.get('default_include',
                                      salt.config.DEFAULT_MINION_OPTS['default_include'])))

    return os.path.join(minion_d_dir, '_mgractionchains.conf')

def _read_next_ac_chunk(clear=True):
    '''
    Read and remove the content of '_mgractionchains.conf' file. Return the parsed YAML.
    '''
    f_storage_filename = _get_ac_storage_filenamepath()
    if not os.path.isfile(f_storage_filename):
        return None
    ret = None
    try:
        with fopen(f_storage_filename, "r") as f_storage:
            ret = yaml.load(f_storage.read())
        if clear:
            os.remove(f_storage_filename)
        return ret
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error processing YAML from '{0}': {1}".format(f_storage_filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def _add_boot_time(next_chunk, prefix):
    '''
    Add the current boot time to the next_chunk dict
    '''
    uptime = __salt__["status.uptime"]()
    next_chunk["{0}_boot_time".format(prefix)] = uptime["since_iso"]

def _persist_next_ac_chunk(next_chunk):
    '''
    Persist next_chunk to execute as YAML in '_mgractionchains.conf'
    '''
    _add_boot_time(next_chunk, "persist")
    f_storage_filename = _get_ac_storage_filenamepath()
    try:
        f_storage_dir = os.path.dirname(f_storage_filename);
        if not os.path.exists(f_storage_dir):
            os.makedirs(f_storage_dir)
        with fopen(f_storage_filename, "w") as f_storage:
            f_storage.write(yaml.dump(next_chunk))
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error writing YAML from '{0}': {1}".format(f_storage_filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def start(actionchain_id):
    '''
    Start the execution of the given SUSE Manager Action Chain

    actionchain_id
        The SUSE Manager Actionchain ID to execute on this minion.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.start 123
    '''
    if os.path.isfile(_get_ac_storage_filenamepath()):
        msg = "Action Chain '{0}' cannot be started. There is already another " \
              "Action Chain being executed. Please check file '{1}'".format(
                actionchain_id, _get_ac_storage_filenamepath())
        log.error(msg)
        raise CommandExecutionError(msg)
    target_sls = _calculate_sls(actionchain_id, __grains__['machine_id'], 1)
    log.debug("Starting execution of SUSE Manager Action Chains ID "
              "'{0}' -> Target SLS: {1}".format(actionchain_id, target_sls))
    try:
        __salt__['saltutil.sync_states']()
        __salt__['saltutil.sync_modules']()
    except Exception as exc:
        log.error("There was an error while syncing custom states and execution modules")
    ret = __salt__['state.sls'](target_sls, queue=True)
    if isinstance(ret, list):
        raise CommandExecutionError(ret)
    return ret

def next(actionchain_id, chunk, next_action_id=None, ssh_extra_filerefs=None):
    '''
    Persist the next Action Chain chunk to be executed by the 'resume' method.

    next_chunk
        The next target SLS to be executed.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.next actionchains.actionchain_123_machineid_2
    '''
    yaml_dict = {
        'next_chunk': _calculate_sls(actionchain_id, __grains__['machine_id'], chunk)
    }
    if next_action_id:
        yaml_dict['next_action_id'] = next_action_id
    if ssh_extra_filerefs:
        yaml_dict['ssh_extra_filerefs'] = ssh_extra_filerefs
    _persist_next_ac_chunk(yaml_dict)

def get_pending_resume():
    '''
    Get information about any pending action chain chunk execution.
    '''
    next_chunk = _read_next_ac_chunk(False)
    if next_chunk:
        _add_boot_time(next_chunk, "current")
    return next_chunk or {}



def resume():
    '''
    Continue the execution of a SUSE Manager Action Chain.
    This will trigger the execution of the next chunk SLS file stored on '_mgractionchains.conf'

    This method is called by the Salt Reactor as a response to the 'minion/start/event'.
    '''
    next_chunk = _read_next_ac_chunk()
    if not next_chunk:
        return {}
    if type(next_chunk) != dict:
        err_str = "Not able to resume Action Chain execution! Malformed " \
                  "'_mgractionchains.conf' found: {0}".format(next_chunk)
        log.error(err_str)
        raise CommandExecutionError(err_str)
    next_chunk = next_chunk.get('next_chunk')
    log.debug("Resuming execution of SUSE Manager Action Chain -> Target SLS: "
              "{0}".format(next_chunk))
    return __salt__['state.sls'](next_chunk, queue=True)

def clean():
    '''
    Clean execution of an Action Chain by removing '_mgractionchains.conf'.
    '''
    _read_next_ac_chunk()
    return {"success": True}
   070701000000CA000081B400000000000000000000000160C1E96E000005F8000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/mgrclusters.py    # -*- coding: utf-8 -*-
'''
SUSE Manager Clusters Management module for Salt

'''
from __future__ import absolute_import

from salt.exceptions import CommandExecutionError
import logging

log = logging.getLogger(__name__)

__virtualname__ = 'mgrclusters'


def __virtual__():
    '''
    This module is always enabled while 'cmd.run' is available.
    '''
    return __virtualname__ if 'cmd.run' in __salt__ else (False, 'cmd.run is not available')


def _get_provider_fun(provider_module, fun):
    fun_key = "{}.{}".format(provider_module, fun)
    if not provider_module:
        raise CommandExecutionError("You must specify a valid cluster provider module: {}".format(provider_module))
    elif fun_key in __salt__:
        return __salt__[fun_key]
    else:
        raise CommandExecutionError("The selected cluster provider cannot be found: {}".format(provider_module))


def list_nodes(provider_module, params):
    fun = _get_provider_fun(provider_module, 'list_nodes')
    return fun(**params)


def add_node(provider_module, params):
    fun = _get_provider_fun(provider_module, 'add_node')
    return fun(**params)


def remove_node(provider_module, params):
    fun = _get_provider_fun(provider_module, 'remove_node')
    return fun(**params)


def upgrade_cluster(provider_module, params):
    fun = _get_provider_fun(provider_module, 'upgrade_cluster')
    return fun(**params)


def create_cluster(provider_module, params):
    fun = _get_provider_fun(provider_module, 'create_cluster')
    return fun(**params)
070701000000CB000081B400000000000000000000000160C1E96E00000977000000000000000000000000000000000000002900000000susemanager-sls/src/modules/ssh_agent.py  import logging
import subprocess
import salt.utils.timed_subprocess
from salt.exceptions import CommandExecutionError
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

log = logging.getLogger(__name__)

__virtualname__ = 'ssh_agent'

__ssh_agent = '/usr/bin/ssh-agent'
__ssh_add = '/usr/bin/ssh-add'

def __virtual__():
    '''
    This module is always enabled while 'ssh-agent' is available.
    '''
    return __virtualname__ if _which_bin(['ssh-agent']) else (False, 'ssh-agent is not available')

def __call_ssh_tool(ssh_tool, cmd_args = "", **kwargs):
    log.debug("Calling ssh-agent: '{} {}'".format(ssh_tool, cmd_args))
    try:
        ssh_tool_proc = salt.utils.timed_subprocess.TimedProc(
            [ssh_tool] + cmd_args.split(),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE
        )
        ssh_tool_proc.run()
    except Exception as exc:
        error_msg = "Unexpected error while calling {}: {}".format(ssh_tool, exc)
        log.error(error_msg)
        raise CommandExecutionError(error_msg)    

    if ssh_tool_proc.process.returncode != 0:
        error_msg = "Unexpected error {} when calling {} {}: {} {}".format(
                ssh_tool_proc.process.returncode,
                ssh_tool,
                cmd_args,
                salt.utils.stringutils.to_str(ssh_tool_proc.stdout),
                salt.utils.stringutils.to_str(ssh_tool_proc.stderr))
        log.error(error_msg)
        raise CommandExecutionError(error_msg)

    return ssh_tool_proc


def start_agent(**kwargs):
    result = __call_ssh_tool(__ssh_agent)

    stdout = salt.utils.stringutils.to_str(result.stdout)
    ssh_agent_lines = stdout.splitlines()

    variables = dict()
    for line in ssh_agent_lines:
        if line.startswith('SSH'):
            line_content_list = line.split(';')
            var, rest = line_content_list[0], line_content_list[1:]
            key, val = var.strip().split("=", 1)
            variables[key] = val

    __salt__['environ.setenv'](variables)
    return variables


def list_keys(**kwargs):
    result = __call_ssh_tool(__ssh_add, "-l")
    return salt.utils.stringutils.to_str(result.stdout)


def add_key(ssh_key_file, **kwargs):
    __call_ssh_tool(__ssh_add, ssh_key_file)
    return True

def kill(**kwargs):
    __call_ssh_tool(__ssh_agent, "-k")
    return True
 070701000000CC000081B400000000000000000000000160C1E96E00001315000000000000000000000000000000000000002800000000susemanager-sls/src/modules/sumautil.py   # -*- coding: utf-8 -*-
'''
Utility module for Suse Manager

'''
from __future__ import absolute_import

import logging
import socket
import os
import re
import time
import salt.utils
from salt.exceptions import CommandExecutionError

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)

__virtualname__ = 'sumautil'

SYSFS_NET_PATH = '/sys/class/net'


def __virtual__():
    '''
    Only run on Linux systems
    '''
    return __grains__['kernel'] == 'Linux' and __virtualname__ or False


def cat(path):
    '''
    Cat the specified file.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.cat /tmp/file
    '''
    cmd = 'cat %s' % path
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
       return {'retcode': 1, 'stderr': result['stderr']}

    return {'retcode': 0, 'stdout': result['stdout']}


def primary_ips():
    '''
    Get the source IPs that the minion uses to connect to the master.
    Returns the IPv4 and IPv6 address (if available).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.primary_ip
    '''

    get_master_ip = lambda family, host: socket.getaddrinfo(host, 0, family)[0][-1][0]

    master = __opts__.get('master', '')
    log.debug('Using master: {0}'.format(str(master)))

    ret = dict()
    for sock_family, sock_descr in list({socket.AF_INET: 'IPv4', socket.AF_INET6: 'IPv6'}.items()):
        try:
            ret['{0}'.format(sock_descr)] = __salt__['network.get_route'](get_master_ip(sock_family, master))
            log.debug("network.get_route({0}): ".format(ret['{0} source'.format(sock_descr)]))
        except Exception as err:
            log.debug('{0} is not available? {1}'.format(sock_descr, err))

    return ret


def get_net_module(iface):
    '''
    Returns the kernel module used for the give interface
    or None if the module could not be determined of if the
    interface name is wrong.
    Uses '/sys/class/net' to find out the module.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_module eth0
    '''
    sysfspath = os.path.join(SYSFS_NET_PATH, iface, 'device/driver')

    return os.path.exists(sysfspath) and os.path.split(os.readlink(sysfspath))[-1] or None


def get_net_modules():
    '''
    Returns a dictionary of all network interfaces and their
    corresponding kernel module (if it could be determined).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_modules
    '''
    drivers = dict()
    for devdir in os.listdir(SYSFS_NET_PATH):
        try:
            drivers[devdir] = get_net_module(devdir)
        except OSError as devdir:
            log.warn("An error occurred getting net driver for {0}".format(devdir), exc_info=True)

    return drivers or None

def get_kernel_live_version():
    '''
    Returns the patch version of live patching if it is active,
    otherwise None

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_kernel_live_version
    '''
    kernel_live_version = _klp()
    if not kernel_live_version:
        log.debug("No kernel live patch is active")

    return kernel_live_version

def _klp():
    '''
    klp to identify the current kernel live patch

    :return:
    '''
    # get 'kgr' for versions prior to SLE 15
    try:
        from salt.utils.path import which_bin as _which_bin
    except:
        from salt.utils import which_bin as _which_bin

    klp = _which_bin(['klp', 'kgr'])
    patchname = None
    if klp is not None:
        try:
            # loop until patching is finished
            for i in range(10):
                stat = __salt__['cmd.run_all']('{0} status'.format(klp), output_loglevel='quiet')
                log.debug("klp status: {0}".format(stat['stdout']))
                if stat['stdout'].strip().splitlines()[0] == 'ready':
                    break
                time.sleep(1)
            re_active = re.compile(r"^\s+active:\s*(\d+)$")
            ret = __salt__['cmd.run_all']('{0} -v patches'.format(klp), output_loglevel='quiet')
            log.debug("klp patches: {0}".format(ret['stdout']))
            if ret['retcode'] == 0:
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue

                    match_active = re_active.match(line)
                    if match_active and int(match_active.group(1)) > 0:
                        return {'mgr_kernel_live_version': patchname }
                    elif line.startswith('kgraft') or line.startswith('livepatch'):
                        # kgr patches have prefix 'kgraft', whereas klp patches start with 'livepatch'
                        patchname = line.strip()

        except Exception as error:
            log.error("klp: {0}".format(str(error)))
   070701000000CD000081B400000000000000000000000160C1E96E00000B37000000000000000000000000000000000000002600000000susemanager-sls/src/modules/udevdb.py # -*- coding: utf-8 -*-
'''
Export udev database

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only work when udevadm is installed.
    '''
    return _which_bin(['udevadm']) is not None


def exportdb():
    '''
    Extract all info delivered by udevadm

    CLI Example:

    .. code-block:: bash

        salt '*' udev.info /dev/sda
        salt '*' udev.info /sys/class/net/eth0
    '''

    cmd = 'udevadm info --export-db'
    udev_result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if udev_result['retcode'] != 0:
        raise CommandExecutionError(udev_result['stderr'])

    devices = []
    dev = {}
    for line in (line.strip() for line in udev_result['stdout'].splitlines()):
        if line:
            line = line.split(':', 1)
            if len(line) != 2:
                continue
            query, data = line
            if query == 'E':
                if query not in dev:
                    dev[query] = {}
                key, val = data.strip().split('=', 1)

                try:
                    val = int(val)
                except ValueError:
                    try:
                        val = float(val)
                    except ValueError:
                        pass  # Quiet, this is not a number.

                dev[query][key] = val
            else:
                if query not in dev:
                    dev[query] = []
                dev[query].append(data.strip())
        else:
            if dev:
                normalize(dev)
                add_scsi_info(dev)
                devices.append(dev)
                dev = {}
    if dev:
        normalize(dev)
        add_scsi_info(dev)
        devices.append(dev)

    return devices


def normalize(dev):
    '''
    Replace list with only one element to the value of the element.

    :param dev:
    :return:
    '''
    for sect, val in list(dev.items()):
        if isinstance(val, list) and len(val) == 1:
            dev[sect] = val[0]

    return dev


def add_scsi_info(dev):
    '''
    Add SCSI info from sysfs
    '''
    if dev.get('E') and dev.get('E').get('SUBSYSTEM') == 'scsi' and dev.get('E').get('DEVTYPE') == 'scsi_device':
        sysfs_path = dev['P']
        scsi_type = __salt__['cmd.run_all']('cat /sys/{0}/type'.format(sysfs_path), output_loglevel='quiet')

        if scsi_type['retcode'] != 0:
            raise CommandExecutionError(scsi_type['stderr'])

        dev['X-Mgr'] = {}
        dev['X-Mgr']['SCSI_SYS_TYPE'] = scsi_type['stdout']
 070701000000CE000081B400000000000000000000000160C1E96E0000FF96000000000000000000000000000000000000002C00000000susemanager-sls/src/modules/uyuni_config.py   # coding: utf-8
from typing import Any, Dict, List, Optional, Union, Tuple
import ssl
import xmlrpc.client  # type: ignore
import logging

import os
import salt.config
from salt.utils.minions import CkMinions
import datetime

AUTHENTICATION_ERROR = 2950

log = logging.getLogger(__name__)

__pillar__: Dict[str, Any] = {}
__context__: Dict[str, Any] = {}
__virtualname__: str = "uyuni"


class UyuniUsersException(Exception):
    """
    Uyuni users Exception
    """


class UyuniChannelsException(Exception):
    """
    Uyuni channels Exception
    """


class RPCClient:
    """
    RPC Client
    """

    def __init__(self, user: str = None, password: str = None, url: str = "https://localhost/rpc/api"):
        """
        XML-RPC client interface.

        :param user: username for the XML-RPC API endpoints
        :param password: password credentials for the XML-RPC API endpoints
        :param url: URL of the remote host
        """

        ctx: ssl.SSLContext = ssl.create_default_context()
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
        self.conn = xmlrpc.client.ServerProxy(url, context=ctx, use_datetime=True, use_builtin_types=True)
        if user is None or password is None:
            # if user or password not set, fallback to default user defined on pillar data
            if "xmlrpc" in (__pillar__ or {}).get("uyuni", {}):
                rpc_conf = (__pillar__ or {})["uyuni"]["xmlrpc"] or {}
                self._user: str = rpc_conf.get("user", "")
                self._password: str = rpc_conf.get("password", "")
            else:
                raise UyuniUsersException("Unable to find Pillar configuration for Uyuni XML-RPC API")
        else:
            self._user: str = user
            self._password: str = password

        self.token: Optional[str] = None

    def get_user(self):
        return self._user

    def get_token(self, refresh: bool = False) -> Optional[str]:
        """
        Authenticate.
        If a authentication token is present on __context__ it will be returned
        Otherwise get a new authentication token from xml rpc.
        If refresh is True, get a new token from the API regardless of prior status.

        :param refresh: force token refresh, discarding any cached value
        :return: authentication token
        """
        if self.token is None or refresh:
            try:
                auth_token_key = "uyuni.auth_token_" + self._user
                if (not auth_token_key in __context__) or refresh:
                    __context__[auth_token_key] = self.conn.auth.login(self._user, self._password)
            except Exception as exc:
                log.error("Unable to login to the Uyuni server: %s", exc)
                raise exc
            self.token = __context__[auth_token_key]
        return self.token

    def __call__(self, method: str, *args, **kwargs) -> Any:
        self.get_token()
        if self.token is not None:
            try:
                log.debug("Calling RPC method %s", method)
                return getattr(self.conn, method)(*((self.token,) + args))
            except Exception as exc:
                if exc.faultCode != AUTHENTICATION_ERROR:
                    log.error("Unable to call RPC function: %s", str(exc))
                    raise exc
                """
                Authentication error when using Token, it can have expired.
                Call a second time with a new session token
                """
                log.warning("Fall back to the second try due to %s", str(exc))
                try:
                    return getattr(self.conn, method)(*((self.get_token(refresh=True),) + args))
                except Exception as exc:
                    log.error("Unable to call RPC function: %s", str(exc))
                    raise exc

        raise UyuniUsersException("XML-RPC backend authentication error.")


class UyuniRemoteObject:
    """
    RPC client
    """

    def __init__(self, user: str = None, password: str = None):
        self.client: RPCClient = RPCClient(user=user, password=password)

    @staticmethod
    def _convert_datetime_str(response: Dict[str, Any]) -> Dict[str, Any]:
        """
        modify any key-value pair where value is a datetime object to a string.

        :param response: response dictionary to be processed

        :return: new dictionary with datetime objects converted to sting
        """
        if response:
            return dict(
                [
                    (k, "{0}".format(v)) if isinstance(v, datetime.datetime) else (k, v)
                    for k, v in response.items()
                ]
            )
        return None

    @staticmethod
    def _convert_datetime_list(response: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        modify any list of key-value pair where value is a datetime object to a string.

        :param response: list of dictionaries to be processed

        :return: List of new dictionaries with datetime objects converted to sting
        """
        if response:
            return [UyuniRemoteObject._convert_datetime_str(value) for value in response]
        return None

    @staticmethod
    def _convert_bool_response(response: int):
        return response == 1

class UyuniUser(UyuniRemoteObject):
    """
    CRUD operation on users.
    """

    def get_details(self, login: str) -> Dict[str, Any]:
        """
        Retrieve details of an Uyuni user.

        :param: login: user name to lookup

        :return: Dictionary with user details
        """
        return self.client("user.getDetails", login)

    def list_users(self) -> List[Dict[str, Any]]:
        """
        Return all Uyuni users visible to the authenticated user.

        :return: all users visible to the authenticated user
        """
        return self.client("user.listUsers")

    def create(self, login: str, password: str, email: str, first_name: str = "", last_name: str = "",
               use_pam_auth: bool = False) -> bool:
        """
        Create an Uyuni user.
        User will be created in the same organization as the authenticated user.

        :param login: desired login name
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name
        :param use_pam_auth: if you wish to use PAM authentication for this user

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.create", login, password,
                                                       first_name, last_name, email, int(use_pam_auth)))

    def set_details(self, login: str, password: str, email: str, first_name: str = "", last_name: str = "") -> bool:
        """
        Update an Uyuni user information.

        :param login: login name
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.setDetails", login, {
            "password": password,
            "first_name": first_name,
            "last_name": last_name,
            "email": email
        }))

    def delete(self, login: str) -> bool:
        """
        Remove an Uyuni user.

        :param login: login of the user

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.delete", login))

    def list_roles(self, login: str) -> List[str]:
        """
        Return the list of roles of a user.

        :param: login: user name to use on lookup

        :return: list of user roles
        """
        return self.client("user.listRoles", login)

    def add_role(self, login: str, role: str) -> bool:
        """
        Add a role to a user

        :param login: login of the user
        :param role: a new role

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.addRole", login, role))

    def remove_role(self, login: str, role: str) -> bool:
        """
        Remove user from the Uyuni org.

        :param login: login of the user
        :param role: one of uyuni user roles

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.removeRole", login, role))

    def list_assigned_system_groups(self, login: str) -> List[Dict[str, Union[int, str]]]:
        """
        Returns the system groups that a user can administer.

        :param login: login of the user

        :return: List of system groups that a user can administer
        """
        return self.client("user.listAssignedSystemGroups", login)

    def add_assigned_system_groups(self, login: str, server_group_names: List[str], set_default: bool = False) -> int:
        """
        Add system groups to a user's list of assigned system groups.

        :param login: user id to look for
        :param server_group_names: system groups to add
        :param set_default: True if the system groups should also be added to user's default list.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.addAssignedSystemGroups",
                                                       login, server_group_names, set_default))

    def remove_assigned_system_groups(self, login: str, server_group_names: List[str], set_default: bool = False) -> int:
        """
        Remove system groups from a user's list of assigned system groups

        :param login: user id to look for
        :param server_group_names: systems groups to remove from list of assigned system groups
        :param set_default: True if the system groups should also be removed to user's default list.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("user.removeAssignedSystemGroups",
                                                       login, server_group_names, set_default))


class UyuniChannel(UyuniRemoteObject):
    def list_manageable_channels(self) -> List[Dict[str, Union[int, str]]]:
        """
        List all software channels that the user is entitled to manage.

        :return: list of manageable channels
        """
        return self.client("channel.listManageableChannels")

    def list_my_channels(self) -> List[Dict[str, Union[int, str]]]:
        """
        List all software channels that the user is entitled to manage.

        :return: list of manageable channels
        """
        return self.client("channel.listMyChannels")


class UyuniChannelSoftware(UyuniRemoteObject):
    def set_user_manageable(self, channel_label: str, login: str, access: bool) -> int:
        """
        Set the manageable flag for a given channel and user.
        If access is set to 'true', this method will give the user manage permissions to the channel.
        Otherwise, that privilege is revoked.

        :param channel_label: label of the channel
        :param login: user login id
        :param access: True if the user should have management access to channel

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("channel.software.setUserManageable",
                                                       channel_label, login, access))

    def set_user_subscribable(self, channel_label: str, login: str, access: bool) -> int:
        """
        Set the subscribable flag for a given channel and user.
        If value is set to 'true', this method will give the user subscribe permissions to the channel.
        Otherwise, that privilege is revoked.

        :param channel_label: label of the channel
        :param login: user login id
        :param access: True if the user should have subscribe permission to the channel

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("channel.software.setUserSubscribable",
                                                       channel_label, login, access))

    def is_user_manageable(self, channel_label: str, login: str) -> bool:
        """
        Returns whether the channel may be managed by the given user.

        :param channel_label: label of the channel
        :param login: user login id

        :return: boolean which indicates if user can manage channel or not
        """
        return self._convert_bool_response(self.client("channel.software.isUserManageable", channel_label, login))

    def is_user_subscribable(self, channel_label: str, login: str) -> bool:
        """
        Returns whether the channel may be subscribed to by the given user.

        :param channel_label: label of the channel
        :param login: user login id

        :return: boolean which indicates if user subscribe the channel or not
        """
        return self._convert_bool_response(self.client("channel.software.isUserSubscribable", channel_label, login))

    def is_globally_subscribable(self, channel_label: str) -> bool:
        """
        Returns whether the channel is globally subscribable on the organization

        :param channel_label: label of the channel

        :return: boolean which indicates if channel is globally subscribable
        """
        return self._convert_bool_response(self.client("channel.software.isGloballySubscribable", channel_label))


class UyuniOrg(UyuniRemoteObject):
    """
    CRUD operations on organizations
    """

    def list_orgs(self) -> Dict[str, Union[int, str, bool]]:
        """
        List all organizations.

        :return: list of all existing organizations
        """
        return self.client("org.listOrgs")

    def get_details(self, name: str) -> Dict[str, Union[int, str, bool]]:
        """
        Get org data by name.

        :param name: organisation name

        :return: organization details
        """
        return self.client("org.getDetails", name)

    def create(self, name: str, org_admin_user: str, org_admin_password: str,
               first_name: str, last_name: str, email: str,
               admin_prefix: str = "Mr.", pam: bool = False) -> Dict[str, Union[str, int, bool]]:
        """
        Create a new Uyuni org.

        :param name: organization name
        :param org_admin_user: organization admin user
        :param org_admin_password: organization admin password
        :param first_name: organization admin first name
        :param last_name: organization admin last name
        :param email: organization admin email
        :param admin_prefix: organization admin prefix
        :param pam:organization admin pam authentication

        :return: dictionary with org information
        """
        return self.client("org.create", name, org_admin_user, org_admin_password, admin_prefix,
                           first_name, last_name, email, pam)

    def delete(self, name: str) -> int:
        """
        Delete an Uyuni org.

        :param name: organization name

        :return: boolean, True indicates success
        """
        org_id = int(self.get_details(name=name).get("id", -1))
        return self._convert_bool_response(self.client("org.delete", org_id))

    def update_name(self, org_id: int, name: str) -> Dict[str, Union[str, int, bool]]:
        """
        Update an Uyuni org name.

        :param org_id: organization internal id
        :param name: new organization name

        :return: organization details
        """
        return self.client("org.updateName", org_id, name)


class UyuniOrgTrust(UyuniRemoteObject):

    def __init__(self, user: str = None, password: str = None):
        UyuniRemoteObject.__init__(self, user, password)
        self._org_manager = UyuniOrg(user, password)

    def list_orgs(self) -> List[Dict[str, Union[str, int]]]:
        """
        List all organizations trusted by the authenticated user organization

        :return: List of organization details
        """
        return self.client("org.trusts.listOrgs")

    def list_trusts(self, org_name: str) -> List[Dict[str, Union[str, int, bool]]]:
        """
        List all trusts for the organization

        :return: list with all organizations and their trust status
        """
        org = self._org_manager.get_details(org_name)
        return self.client("org.trusts.listTrusts", org["id"])

    def add_trust_by_name(self, org_name: str, org_trust: str) -> int:
        """
        Set an organisation as trusted by another

        :param org_name: organization name
        :param org_trust: name of organization to trust

        :return: boolean, True indicates success
        """
        this_org = self._org_manager.get_details(org_name)
        trust_org = self._org_manager.get_details(org_trust)
        return self.add_trust(this_org["id"], trust_org["id"])

    def add_trust(self, org_id: str, org_trust_id: str) -> int:
        """
        Set an organisation as trusted by another

        :param org_id: organization id
        :param org_trust_id: organization id to trust

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("org.trusts.addTrust", org_id, org_trust_id))

    def remove_trust_by_name(self, org_name: str, org_untrust: str) -> int:
        """
        Set an organisation as not trusted by another

        :param org_name: organization name
        :param org_untrust: organization name to untrust

        :return: boolean, True indicates success
        """
        this_org = self._org_manager.get_details(org_name)
        trust_org = self._org_manager.get_details(org_untrust)
        return self.remove_trust(this_org["id"], trust_org["id"])

    def remove_trust(self, org_id: str, org_untrust_id: str) -> int:
        """
        Set an organisation as not trusted by another

        :param org_id: organization id
        :param org_untrust_id: organization id to untrust

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("org.trusts.removeTrust", org_id, org_untrust_id))


class UyuniSystemgroup(UyuniRemoteObject):
    """
    Provides methods to access and modify system groups.
    """

    def list_all_groups(self) -> List[Dict[str, Union[int, str]]]:
        """
        Retrieve a list of system groups that are accessible by the user
        :return: list with group information
        """
        return self.client("systemgroup.listAllGroups")

    def get_details(self, name: str) -> Dict[str, Union[int, str]]:
        """
        Retrieve details of a system group.

        :param name: Name of the system group.
        :return: data of the system group.
        """
        return self.client("systemgroup.getDetails", name)

    def create(self, name: str, description: str) -> Dict[str, Union[int, str]]:
        """
        Create a new system group.

        :param name: Name of the system group.
        :param description: Description of the system group.

        :return: data of the system group.
        """
        return self.client("systemgroup.create", name, description)

    def delete(self, name: str) -> int:
        """
        Delete a system group.

        :param name: Name of the system group.

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("systemgroup.delete", name))

    def update(self, name: str, description: str) -> Dict[str, Union[int, str]]:
        """
        Update an existing system group.

        :param name: Name of the system group.
        :param description: Description of the system group.

        :return: data of the system group.
        """
        return self.client("systemgroup.update", name, description)

    def list_systems(self, name: str, minimal: bool = True) -> List[Dict[str, Any]]:
        """
        Get information about systems in a group.

        :param name: Group name
        :param minimal: default True. Only return minimal information about systems, use False to get more details

        :return: List of system information
        """
        return self._convert_datetime_list(
            self.client("systemgroup.listSystemsMinimal" if minimal else "systemgroup.listSystems", name))

    def add_remove_systems(self, name: str, add_remove: bool, system_ids: List[int] = []) -> int:
        """
        Add or remove systems from a system group

        :param name: Group name
        :param add_remove: True to add to the group, False to remove
        :param system_ids: List of system ids to add or remove

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("systemgroup.addOrRemoveSystems", name, system_ids, add_remove))


class UyuniSystems(UyuniRemoteObject):

    def get_minion_id_map(self, refresh: bool = False) -> Dict[str, int]:
        """
        Returns a map from minion ID to Uyuni system ID for all systems a user has access to
        This method caches results, in order to avoid multiple XMLRPC calls.

        :param refresh: Get new data from server, ignoring values in local context cache
        :return: Map between minion ID and system ID of all system accessible by authenticated user
        """
        minions_token_key = "uyuni.minions_id_map_" + self.client.get_user()
        if (not minions_token_key in __context__) or refresh:
            __context__[minions_token_key] = self.client("system.getMinionIdMap")
        return __context__[minions_token_key]


class UyuniActivationKey(UyuniRemoteObject):
    """
    CRUD operations on Activation Keys.
    """

    def get_details(self, id: str) -> Dict[str, Any]:
        """
        Get details of an Uyuni Activation Key

        :param id: the Activation Key ID

        :return: Activation Key information
        """
        return self.client("activationkey.getDetails", id)

    def delete(self, id: str) -> bool:
        """
        Deletes an Uyuni Activation Key

        :param id: the Activation Key ID

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.delete", id))

    def create(self, key: str, description: str,
               base_channel_label: str = '',
               usage_limit: int = 0,
               system_types: List[int] = [],
               universal_default: bool = False) -> bool:
        """
        Creates an Uyuni Activation Key

        :param key: activation key name
        :param description: activation key description
        :param base_channel_label: base channel to be used
        :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
        :param system_types: system types to be assigned.
                             Can be one of: 'virtualization_host', 'container_build_host',
                             'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
        :param universal_default: sets this activation key as organization universal default

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.create", key, description, base_channel_label,
                                                        usage_limit, system_types, universal_default))

    def set_details(self, key: str,
                    description: str = None,
                    contact_method: str = None,
                    base_channel_label: str = None,
                    usage_limit: int = None,
                    universal_default: bool = False):
        """
        Updates an Uyuni Activation Key

        :param key: activation key name
        :param description: activation key description
        :param base_channel_label: base channel to be used
        :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
        :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
        :param universal_default: sets this activation key as organization universal default

        :return: boolean, True indicates success
        """
        data = {'universal_default': universal_default}
        if description:
            data['description'] = description
        if base_channel_label is not None:
            data['base_channel_label'] = base_channel_label
        if contact_method:
            data['contact_method'] = contact_method

        if usage_limit:
            data['usage_limit'] = usage_limit
        else:
            data['unlimited_usage_limit'] = True
        return self._convert_bool_response(self.client("activationkey.setDetails", key, data))

    def add_entitlements(self, key: str, system_types: List[str]) -> bool:
        """
        Add a list of entitlements to an activation key.

        :param key: activation key name
        :param system_types: list of system types to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addEntitlements", key, system_types))

    def remove_entitlements(self, key: str, system_types: List[str]) -> bool:
        """
        Remove a list of entitlements from an activation key.

        :param key: activation key name
        :param system_types: list of system types to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeEntitlements", key, system_types))

    def add_child_channels(self, key: str, child_channels: List[str]) -> bool:
        """
        Add child channels to an activation key.

        :param key: activation key name
        :param child_channels: List of child channels to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addChildChannels", key, child_channels))

    def remove_child_channels(self, key: str, child_channels: List[str]) -> bool:
        """
        Remove child channels from an activation key.

        :param key: activation key name
        :param child_channels: List of child channels to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeChildChannels", key, child_channels))

    def check_config_deployment(self, key: str) -> bool:
        """
        Return the status of the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, true if enabled, false if disabled,
        """
        return self._convert_bool_response(self.client("activationkey.checkConfigDeployment", key))

    def enable_config_deployment(self, key: str) -> bool:
        """
        Enables the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.enableConfigDeployment", key))

    def disable_config_deployment(self, key: str) -> bool:
        """
        Disables the 'configure_after_registration' flag for an Activation Key.

        :param key: activation key name

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.disableConfigDeployment", key))

    def add_packages(self, key: str, packages: List[Any]) -> bool:
        """
        Add a list of packages to an activation key.

        :param key: activation key name
        :param packages: list of packages to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addPackages", key, packages))

    def remove_packages(self, key: str, packages: List[Any]) -> bool:
        """
        Remove a list of packages from an activation key.

        :param key: activation key name
        :param packages: list of packages to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removePackages", key, packages))

    def add_server_groups(self, key: str, server_groups: List[int]) -> bool:
        """
        Add a list of server groups to an activation key.

        :param key: activation key name
        :param server_groups: list of server groups to be added

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.addServerGroups", key, server_groups))

    def remove_server_groups(self, key: str, server_groups: List[int]) -> bool:
        """
        Remove a list of server groups from an activation key.

        :param key: activation key name
        :param server_groups: list of server groups to be removed

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.removeServerGroups", key, server_groups))

    def list_config_channels(self, key: str) -> List[Dict[str, Any]]:
        """
        List configuration channels associated to an activation key.
    
        :param key: activation key name

        :return: List of configuration channels
        """
        return self.client("activationkey.listConfigChannels", key)

    def set_config_channels(self, keys: List[str], config_channel_label: List[str]) -> bool:
        """
        Replace the existing set of configuration channels on the given activation keys.
        Channels are ranked by their order in the array.

        :param keys: list of activation key names
        :param config_channel_label: list of configuration channels lables

        :return: boolean, True indicates success
        """
        return self._convert_bool_response(self.client("activationkey.setConfigChannels", keys, config_channel_label))


class UyuniChildMasterIntegration:
    """
    Integration with the Salt Master which is running
    on the same host as this current Minion.
    """
    DEFAULT_MASTER_CONFIG_PATH = "/etc/salt/master"

    def __init__(self):
        self._minions = CkMinions(salt.config.client_config(self._get_master_config()))

    @staticmethod
    def _get_master_config() -> str:
        """
        Return master config.
        :return: path to salt master configuration file
        """
        cfg_path = UyuniChildMasterIntegration.DEFAULT_MASTER_CONFIG_PATH
        for path in __pillar__.get("uyuni", {}).get("masters", {}).get("configs", [cfg_path]):
            if os.path.exists(path):
                cfg_path = path
                break

        return cfg_path

    def select_minions(self, target: str, target_type: str = "glob") -> Dict[str, Union[List[str], bool]]:
        """
        Select minion IDs that matches the target expression.

        :param target: target expression to be applied
        :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                    pillar_exact, compound, compound_pillar_exact. Default: glob.

        :return: list of minions
        """
        return self._minions.check_minions(expr=target, tgt_type=target_type)


def __virtual__():
    """
    Provide Uyuni configuration state module.

    :return:
    """

    return __virtualname__


# Users

def user_get_details(login, password=None, org_admin_user=None, org_admin_password=None):
    """
    Get details of an Uyuni user
    If password is provided as a parameter, then it will be used to authenticate
    If no user credentials are provided, organization administrator credentials will be used
    If no user credentials neither organization admin credentials are provided, credentials from pillar will be used

    :param login: user id to look for
    :param password: password for the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: The user information
    """
    return UyuniUser(org_admin_user if password is None else login,
                     org_admin_password if password is None else password).get_details(login)


def user_list_users(org_admin_user=None, org_admin_password=None):
    """
    Return all Uyuni users visible to the authenticated user.

    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: all users visible to the authenticated user
    """
    return UyuniUser(org_admin_user, org_admin_password).list_users()


def user_create(login, password, email, first_name, last_name, use_pam_auth=False,
                org_admin_user=None, org_admin_password=None):
    """
    Create an Uyuni user.

    :param login: user id to look for
    :param password: password for the user
    :param email: user email address
    :param first_name: user first name
    :param last_name: user last name
    :param use_pam_auth: if you wish to use PAM authentication for this user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).create(login=login, password=password, email=email,
                                                                first_name=first_name, last_name=last_name,
                                                                use_pam_auth=use_pam_auth)


def user_set_details(login, password, email, first_name=None, last_name=None,
                     org_admin_user=None, org_admin_password=None):
    """
    Update an Uyuni user.

    :param login: user id to look for
    :param password: password for the user
    :param email: user email address
    :param first_name: user first name
    :param last_name: user last name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).set_details(login=login, password=password, email=email,
                                                                     first_name=first_name, last_name=last_name)


def user_delete(login, org_admin_user=None, org_admin_password=None):
    """
    Deletes an Uyuni user

    :param login: user id to look for
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).delete(login)


def user_list_roles(login, password=None, org_admin_user=None, org_admin_password=None):
    """
    Returns an Uyuni user roles.
    If password is provided as a parameter, then it will be used to authenticate
    If no user credentials are provided, organization administrator credentials will be used
    If no user credentials neither organization admin credentials are provided, credentials from pillar are used

    :param login: user id to look for
    :param password: password for the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of user roles assigned
    """
    return UyuniUser(org_admin_user if password is None else login,
                     org_admin_password if password is None else password).list_roles(login)


def user_add_role(login, role, org_admin_user=None, org_admin_password=None):
    """
    Adds a role to an Uyuni user.

    :param login: user id to look for
    :param role: role to be added to the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).add_role(login=login, role=role)


def user_remove_role(login, role, org_admin_user=None, org_admin_password=None):
    """
    Remove a role from an Uyuni user.

    :param login: user id to look for
    :param role: role to be removed from the user
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user, org_admin_password).remove_role(login=login, role=role)


def user_list_assigned_system_groups(login, org_admin_user=None, org_admin_password=None):
    """
    Returns the system groups that a user can administer.

    :param login: user id to look for
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of system groups that a user can administer
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).list_assigned_system_groups(login=login)


def user_add_assigned_system_groups(login, server_group_names, set_default=False,
                                    org_admin_user=None, org_admin_password=None):
    """
    Add system groups to user's list of assigned system groups.

    :param login: user id to look for
    :param server_group_names: systems groups to add to list of assigned system groups
    :param set_default: Should system groups also be added to user's list of default system groups.
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).add_assigned_system_groups(login=login,
                                                                    server_group_names=server_group_names,
                                                                    set_default=set_default)


def user_remove_assigned_system_groups(login, server_group_names, set_default=False,
                                       org_admin_user=None, org_admin_password=None):
    """
    Remove system groups from a user's list of assigned system groups.

    :param login: user id to look for
    :param server_group_names: systems groups to remove from list of assigned system groups
    :param set_default: Should system groups also be added to user's list of default system groups.
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniUser(org_admin_user,
                     org_admin_password).remove_assigned_system_groups(login=login,
                                                                       server_group_names=server_group_names,
                                                                       set_default=set_default)


# Software channels

def channel_list_manageable_channels(login, password):
    """
    List all of manageable channels for the authenticated user

    :param login: user login id
    :param password: user password

    :return: list of manageable channels for the user
    """
    return UyuniChannel(login, password).list_manageable_channels()


def channel_list_my_channels(login, password):
    """
    List all of subscribed channels for the authenticated user

    :param login: user login id
    :param password: user password

    :return: list of subscribed channels for the user
    """
    return UyuniChannel(login, password).list_my_channels()


def channel_software_set_user_manageable(channel_label, login, access,
                                         org_admin_user=None, org_admin_password=None):
    """
    Set the manageable flag for a given channel and user.
    If access is set to 'true', this method will give the user manage permissions to the channel.
    Otherwise, that privilege is revoked.

    :param channel_label: label of the channel
    :param login: user login id
    :param access: True if the user should have management access to channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).set_user_manageable(channel_label, login, access)


def channel_software_set_user_subscribable(channel_label, login, access,
                                           org_admin_user=None, org_admin_password=None):
    """
    Set the subscribable flag for a given channel and user.
    If value is set to 'true', this method will give the user subscribe permissions to the channel.
    Otherwise, that privilege is revoked.

    :param channel_label: label of the channel
    :param login: user login id
    :param access: True if the user should have subscribe access to channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).set_user_subscribable(channel_label, login, access)


def channel_software_is_user_manageable(channel_label, login, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel may be managed by the given user.

    :param channel_label: label of the channel
    :param login: user login id
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if user can manage channel or not
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_user_manageable(channel_label, login)


def channel_software_is_user_subscribable(channel_label, login, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel may be subscribed by the given user.

    :param channel_label: label of the channel
    :param login: user login id
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if user subscribe the channel or not
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_user_subscribable(channel_label, login)


def channel_software_is_globally_subscribable(channel_label, org_admin_user=None, org_admin_password=None):
    """
    Returns whether the channel is globally subscribable on the organization

    :param channel_label: label of the channel
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean which indicates if channel is globally subscribable
    """
    return UyuniChannelSoftware(org_admin_user, org_admin_password).is_globally_subscribable(channel_label)


def org_list_orgs(admin_user=None, admin_password=None):
    """
    List all organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: list of all available organizations.
    """
    return UyuniOrg(admin_user, admin_password).list_orgs()


def org_get_details(name, admin_user=None, admin_password=None):
    """
    Get details of an organization.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organisation name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: organization details
    """
    return UyuniOrg(admin_user, admin_password).get_details(name)


def org_delete(name, admin_user=None, admin_password=None):
    """
    Delete an organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrg(admin_user, admin_password).delete(name)


def org_create(name, org_admin_user, org_admin_password, first_name, last_name, email,
               admin_prefix="Mr.", pam=False, admin_user=None, admin_password=None):
    """
    Create an Uyuni organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password
    :param first_name: organization admin first name
    :param last_name: organization admin last name
    :param email: organization admin email
    :param admin_prefix: organization admin prefix
    :param pam:organization admin pam authentication
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dictionary with org information
    """
    return UyuniOrg(admin_user, admin_password).create(name=name, org_admin_user=org_admin_user,
                                                       org_admin_password=org_admin_password,
                                                       first_name=first_name, last_name=last_name, email=email,
                                                       admin_prefix=admin_prefix, pam=pam)


def org_update_name(org_id, name, admin_user=None, admin_password=None):
    """
    update an Uyuni organization name
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: organization internal id
    :param name: new organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: organization details
    """
    return UyuniOrg(admin_user, admin_password).update_name(org_id, name)


def org_trust_list_orgs(org_admin_user=None, org_admin_password=None):
    """
    List all organizations trusted by the authenticated user organization

    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password

    :return: List of organization details
    """
    return UyuniOrgTrust(org_admin_user, org_admin_password).list_orgs()


def org_trust_list_trusts(org_name, admin_user=None, admin_password=None):
    """
    List all trusts for one organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: Name of the organization to get the trusts
    :param admin_user: authentication user
    :param admin_password: authentication user password

    :return: list with all organizations and their trust status
    """
    return UyuniOrgTrust(admin_user, admin_password).list_trusts(org_name)


def org_trust_add_trust_by_name(org_name, org_trust, admin_user=None, admin_password=None):
    """
    Add an organization to the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: organization name
    :param org_trust: Trust organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).add_trust_by_name(org_name, org_trust)


def org_trust_add_trust(org_id, org_trust_id, admin_user=None, admin_password=None):
    """
    Add an organization to the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: Organization id
    :param org_trust_id: Trust organization id
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).add_trust(org_id, org_trust_id)


def org_trust_remove_trust_by_name(org_name, org_untrust, admin_user=None, admin_password=None):
    """
    Remove an organization from the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_name: organization name
    :param org_untrust: organization name to untrust
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).remove_trust_by_name(org_name, org_untrust)


def org_trust_remove_trust(org_id, org_untrust_id, admin_user=None, admin_password=None):
    """
    Remove an organization from the list of trusted organizations.
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param org_id: orgnization id
    :param org_untrust_id: organizaton id to untrust
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: boolean, True indicates success
    """
    return UyuniOrgTrust(admin_user, admin_password).remove_trust(org_id, org_untrust_id)


# System Groups

def systemgroup_create(name, descr, org_admin_user=None, org_admin_password=None):
    """
    Create a system group.

    :param name: Name of the system group.
    :param descr: Description of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).create(name=name, description=descr)


def systemgroup_list_all_groups(username, password):
    """
    Retrieve a list of system groups that are accessible by the user

    :param username: username to authenticate with
    :param password: password to authenticate with
    :return:
    """
    return UyuniSystemgroup(username, password).list_all_groups()


def systemgroup_get_details(name, org_admin_user=None, org_admin_password=None):
    """
    Return system group details.

    :param name: Name of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).get_details(name=name)


def systemgroup_update(name, descr, org_admin_user=None, org_admin_password=None):
    """
    Update a system group.

    :param name: Name of the system group.
    :param descr: Description of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: details of the system group
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).update(name=name, description=descr)


def systemgroup_delete(name, org_admin_user=None, org_admin_password=None):
    """
    Delete a system group.

    :param name: Name of the system group.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: boolean, True indicates success
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).delete(name=name)


def systemgroup_list_systems(name, minimal=True, org_admin_user=None, org_admin_password=None):
    """
    List systems in a system group

    :param name: Name of the system group.
    :param minimal: default True. Only return minimal information about systems, use False to get more details
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: List of system information
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).list_systems(name=name, minimal=minimal)


def systemgroup_add_remove_systems(name, add_remove, system_ids=[],
                                   org_admin_user=None, org_admin_password=None):
    """
    Update systems on a system group.

    :param name: Name of the system group.
    :param add_remove: True to add to the group, False to remove.
    :param system_ids: list of system ids to add/remove from group
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: boolean, True indicates success
    """
    return UyuniSystemgroup(org_admin_user, org_admin_password).add_remove_systems(name=name, add_remove=add_remove,
                                                                                   system_ids=system_ids)


def master_select_minions(target=None, target_type="glob"):
    """
    Return list minions from the configured Salt Master on the same host which match the expression on the defined target

    :param target: target expression to filter minions
    :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.

    :return: list of minion IDs
    """
    cmi = UyuniChildMasterIntegration()

    return cmi.select_minions(target=target, target_type=target_type)


def systems_get_minion_id_map(username=None, password=None, refresh=False):
    """
    Returns a map from minion ID to Uyuni system ID for all systems a user has access to

    :param username: username to authenticate
    :param password: password for user
    :param refresh: Get new data from server, ignoring values in local context cache

    :return: Map between minion ID and system ID of all system accessible by authenticated user
    """
    return UyuniSystems(username, password).get_minion_id_map(refresh)


# Activation Keys

def activation_key_get_details(id, org_admin_user=None, org_admin_password=None):
    """
    Get details of an Uyuni Activation Key

    :param id: the Activation Key ID
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: Activation Key information
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).get_details(id)


def activation_key_delete(id, org_admin_user=None, org_admin_password=None):
    """
    Deletes an Uyuni Activation Key

    :param id: the Activation Key ID
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).delete(id)


def activation_key_create(key, description,
                          base_channel_label='',
                          usage_limit=0,
                          system_types=[], universal_default=False,
                          org_admin_user=None, org_admin_password=None):
    """
    Creates an Uyuni Activation Key

    :param key: activation key name
    :param description: activation key description
    :param base_channel_label: base channel to be used
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param system_types: system types to be assigned.
                         Can be one of: 'virtualization_host', 'container_build_host',
                         'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
    :param universal_default: sets this activation key as organization universal default
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).create(key,
                                                                         description,
                                                                         base_channel_label,
                                                                         usage_limit,
                                                                         system_types,
                                                                         universal_default)


def activation_key_set_details(key,
                               description=None,
                               contact_method=None,
                               base_channel_label=None,
                               usage_limit=None,
                               universal_default=False,
                               org_admin_user=None, org_admin_password=None):
    """
    Updates an Uyuni Activation Key

    :param key: activation key name
    :param description: activation key description
    :param base_channel_label: base channel to be used
    :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param universal_default: sets this activation key as organization universal default
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).set_details(key,
                                                                              description=description,
                                                                              contact_method=contact_method,
                                                                              base_channel_label=base_channel_label,
                                                                              usage_limit=usage_limit,
                                                                              universal_default=universal_default)


def activation_key_add_entitlements(key, system_types, org_admin_user=None, org_admin_password=None):
    """
    Add a list of entitlements to an activation key.

    :param key: activation key name
    :param system_types: list of system types to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_entitlements(key, system_types)


def activation_key_remove_entitlements(key, system_types, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of entitlements from an activation key.

    :param key: activation key name
    :param system_types: list of system types to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_entitlements(key, system_types)


def activation_key_add_child_channels(key, child_channels, org_admin_user=None, org_admin_password=None):
    """
    Add child channels to an activation key.

    :param key: activation key name
    :param child_channels: List of child channels to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_child_channels(key, child_channels)


def activation_key_remove_child_channels(key, child_channels, org_admin_user=None, org_admin_password=None):
    """
    Remove child channels from an activation key.

    :param key: activation key name
    :param child_channels: List of child channels to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_child_channels(key, child_channels)


def activation_key_check_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Return the status of the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, true if enabled, false if disabled
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).check_config_deployment(key)


def activation_key_enable_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Enables the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).enable_config_deployment(key)


def activation_key_disable_config_deployment(key, org_admin_user=None, org_admin_password=None):
    """
    Disables the 'configure_after_registration' flag for an Activation Key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).disable_config_deployment(key)


def activation_key_add_packages(key, packages, org_admin_user=None, org_admin_password=None):
    """
    Add a list of packages to an activation key.

    :param key: activation key name
    :param packages: list of packages to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_packages(key, packages)


def activation_key_remove_packages(key, packages, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of packages from an activation key.

    :param key: activation key name
    :param packages: list of packages to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_packages(key, packages)


def activation_key_add_server_groups(key, server_groups, org_admin_user=None, org_admin_password=None):
    """
    Add a list of server groups to an activation key.

    :param key: activation key name
    :param server_groups: list of server groups to be added
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).add_server_groups(key, server_groups)


def activation_key_remove_server_groups(key, server_groups, org_admin_user=None, org_admin_password=None):
    """
    Remove a list of server groups from an activation key.

    :param key: activation key name
    :param server_groups: list of server groups to be removed
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).remove_server_groups(key, server_groups)


def activation_key_list_config_channels(key, org_admin_user=None, org_admin_password=None):
    """
    List configuration channels associated to an activation key.

    :param key: activation key name
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: List of configuration channels
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).list_config_channels(key)


def activation_key_set_config_channels(keys, config_channel_label,
                                       org_admin_user=None, org_admin_password=None):
    """
    Replace the existing set of configuration channels on the given activation keys.
    Channels are ranked by their order in the array.

    :param keys: list of activation key names
    :param config_channel_label: list of configuration channels labels
    :param org_admin_user: organization admin username
    :param org_admin_password: organization admin password

    :return: boolean, True indicates success
    """
    return UyuniActivationKey(org_admin_user, org_admin_password).set_config_channels(keys, config_channel_label)
  070701000000CF000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001B00000000susemanager-sls/src/states    070701000000D0000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002700000000susemanager-sls/src/states/__init__.py    070701000000D1000081B400000000000000000000000160C1E96E00000BDB000000000000000000000000000000000000002800000000susemanager-sls/src/states/mgrcompat.py   # -*- coding: utf-8 -*-
'''
SUSE Manager custom wrapper for Salt "module.run" state module.

This wrapper determines the syntax to use for calling the Salt "module.run" state
that has changed between different Salt version.

Using this wrapper we ensure all SUSE Manager SLS files are using the same syntax
regardless the actual Salt version installed on the minion.

'''
from __future__ import absolute_import

# Import salt libs
from salt.utils.odict import OrderedDict
from salt.states import module

import logging

log = logging.getLogger(__name__)

__virtualname__ = 'mgrcompat'


def __virtual__():
    '''
    This module is always enabled while 'module.run' is available.
    '''
    module.__salt__ = __salt__
    module.__opts__ = __opts__
    module.__pillar__ = __pillar__
    module.__grains__ = __grains__
    module.__context__ = __context__
    module.__utils__ = __utils__
    return __virtualname__

def _tailor_kwargs_to_new_syntax(name, **kwargs):
    nkwargs = {}
    _opt_kwargs = None
    for k, v in kwargs.items():
        if k.startswith("m_"):
            nkwargs[k[2:]] = v
        elif k == 'kwargs':
            _opt_kwargs = kwargs[k]
        else:
            nkwargs[k] = v
    ret = {name: [OrderedDict(nkwargs)]}
    if _opt_kwargs:
        ret[name].append(OrderedDict(_opt_kwargs))
    return ret

def module_run(**kwargs):
    '''
    This function execute the Salt "module.run" state passing the arguments
    in the right way according to the supported syntax depending on the Salt
    minion version and configuration

    '''

    # The new syntax will be used as the default
    use_new_syntax = True

    if __grains__['saltversioninfo'][0] > 3002:
        # Only new syntax - default behavior for Phosphorus and future releases
        pass
    elif __grains__['saltversioninfo'][0] > 2016 and 'module.run' in __opts__.get('use_superseded', []):
        # New syntax - explicitely enabled via 'use_superseded' configuration on 2018.3, 2019.2, 3000.x and 3002.x
        pass
    elif __grains__['saltversioninfo'][0] > 2016 and not 'module.run' in __opts__.get('use_superseded', []):
        # Old syntax - default behavior for 2018.3, 2019.2, 3000.x and 3002.x
        use_new_syntax = False
    elif __grains__['saltversioninfo'][0] <= 2016:
        # Only old syntax - the new syntax is not available for 2016.11 and 2015.8
        use_new_syntax = False

    if use_new_syntax:
        log.debug("Minion is using the new syntax for 'module.run' state. Tailoring parameters.")
        log.debug("Old parameters: {}".format(kwargs))
        old_name = kwargs.pop('name')
        new_kwargs = _tailor_kwargs_to_new_syntax(old_name, **kwargs)
        log.debug("New parameters for 'module.run' state: {}".format(new_kwargs))
    else:
        new_kwargs = kwargs

    ret = module.run(**new_kwargs)
    if use_new_syntax:
        if ret['changes']:
            changes = ret['changes'].pop(old_name)
            ret['changes']['ret'] = changes
        ret['name'] = old_name
    return ret
 070701000000D2000081B400000000000000000000000160C1E96E00000BBC000000000000000000000000000000000000002600000000susemanager-sls/src/states/product.py '''
Handles installation of SUSE products using zypper

Only supported with :mod:`zypper <salt.modules.zypper>`
'''

import logging

from salt.utils.versions import version_cmp
from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'product'

def __virtual__():
    '''
    Only work on SUSE platforms with zypper
    '''
    if __grains__.get('os_family', '') != 'Suse':
        return (False, "Module product: non SUSE OS not supported")

    # Not all versions of SUSE use zypper, check that it is available
    try:
        zypp_info = __salt__['pkg.info_installed']('zypper')['zypper']
    except CommandExecutionError:
        return (False, "Module product: zypper package manager not found")

    # Minimum version that supports 'zypper search --provides'
    if version_cmp(zypp_info['version'], '1.8.13') < 0:
        return (False, "Module product: zypper 1.8.13 or greater required")
    return __virtualname__


def _get_missing_products(refresh):
    # Search for not installed products
    products = []
    try:
        products = list(__salt__['pkg.search'](
            'product()',
            refresh=refresh,
            match='exact',
            provides=True,
            not_installed_only=True
        ))

        log.debug("The following products are not yet installed: %s", ', '.join(products))

    except CommandExecutionError:
        # No search results
        return None

    # Exclude products that are already provided by another to prevent conflicts
    to_install = []
    for pkg in products:
        try:
            res = list(__salt__['pkg.search'](
                pkg,
                match='exact',
                provides=True
            ))

            if pkg in res:
                res.remove(pkg)
            if not res:
                # No other providers than the package itself
                to_install.append(pkg)
            else:
                log.debug("The product '%s' is already provided by '%s'. Skipping.", pkg, ', '.join(res))

        except CommandExecutionError:
            # No search results
            # Not provided by any installed package, add it to the list
            to_install.append(pkg)

    return to_install

def all_installed(name, refresh=False, **kwargs):
    '''
    Ensure that all the subscribed products are installed.

    refresh
        force a refresh if set to True.
        If set to False (default) it depends on zypper if a refresh is
        executed.
    '''

    ret = {'name': name,
           'changes': {},
           'result': True,
           'comment': ''}

    to_install = _get_missing_products(refresh)

    if not to_install:
        # All product packages are already installed
        ret['comment'] = "All subscribed products are already installed"
        ret['result'] = True

        log.debug("All products are already installed. Nothing to do.")
        return ret

    return __states__['pkg.installed'](name, pkgs=to_install)
070701000000D3000081B400000000000000000000000160C1E96E0000F24C000000000000000000000000000000000000002B00000000susemanager-sls/src/states/uyuni_config.py    import logging
from typing import Optional, Dict, Any, List, Tuple
from collections import Counter

SERVER_GROUP_NOT_FOUND_ERROR = 2201
NO_SUCH_USER_ERROR = -213
ORG_NOT_FOUND_ERROR = 2850
ACTIVATION_KEY_NOT_FOUND_ERROR = -212
AUTHENTICATION_ERROR = 2950

log = logging.getLogger(__name__)

__salt__: Dict[str, Any] = {}
__opts__: Dict[str, Any] = {}
__virtualname__ = 'uyuni'


class StateResult:

    @staticmethod
    def state_error(name: str, comment: str = None):
        return StateResult.prepare_result(name, False, comment)

    @staticmethod
    def prepare_result(name: str, result: Optional[bool], comment: str = None, changes: Dict = {}):
        return {
            'name': name,
            'changes': changes,
            'result': result,
            'comment': comment,
        }


class UyuniUsers:

    @staticmethod
    def _update_user_roles(name: str,
                           current_roles: List[str] = [],
                           new_roles: List[str] = [],
                           org_admin_user: str = None,
                           org_admin_password: str = None):

        for role_to_remove in (current_roles or []):
            if role_to_remove not in (new_roles or []):
                __salt__['uyuni.user_remove_role'](name, role=role_to_remove,
                                                   org_admin_user=org_admin_user,
                                                   org_admin_password=org_admin_password)

        for role_to_add in (new_roles or []):
            if role_to_add not in (current_roles or []):
                __salt__['uyuni.user_add_role'](name, role=role_to_add,
                                                org_admin_user=org_admin_user,
                                                org_admin_password=org_admin_password)

    @staticmethod
    def _update_user_system_groups(name: str,
                                   current_system_groups: List[str] = [],
                                   system_groups: List[str] = [],
                                   org_admin_user: str = None,
                                   org_admin_password: str = None):

        systems_groups_add = [sys for sys in (system_groups or []) if sys not in (current_system_groups or [])]
        if systems_groups_add:
            __salt__['uyuni.user_add_assigned_system_groups'](login=name, server_group_names=systems_groups_add,
                                                              org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)

        system_groups_remove = [sys for sys in (current_system_groups or []) if sys not in (system_groups or [])]
        if system_groups_remove:
            __salt__['uyuni.user_remove_assigned_system_groups'](login=name, server_group_names=system_groups_remove,
                                                                 org_admin_user=org_admin_user,
                                                                 org_admin_password=org_admin_password)

    @staticmethod
    def _compute_changes(user_changes: Dict[str, Any],
                         current_user: Dict[str, Any],
                         roles: List[str],
                         current_roles: List[str],
                         system_groups: List[str],
                         current_system_groups: List[str],
                         use_pam_auth: bool = False):
        changes = {}
        error = None
        # user field changes
        for field in ["email", "first_name", "last_name"]:
            if (current_user or {}).get(field) != user_changes.get(field):
                changes[field] = {"new": user_changes[field]}
                if current_user:
                    changes[field]["old"] = (current_user or {}).get(field)

        # role changes
        if Counter(roles or []) != Counter(current_roles or []):
            changes['roles'] = {'new': roles}
            if current_roles:
                changes['roles']['old'] = current_roles

        # system group changes
        if Counter(system_groups or []) != Counter(current_system_groups or []):
            changes['system_groups'] = {'new': system_groups}
            if current_system_groups:
                changes['system_groups']['old'] = current_system_groups

        # check if password have changed
        if current_user and not use_pam_auth:
            try:
                __salt__['uyuni.user_get_details'](user_changes.get('login'),
                                                   user_changes.get('password'))
            except Exception as exc:
                # check if it's an authentication error. If yes, password have changed
                if exc.faultCode == AUTHENTICATION_ERROR:
                    changes["password"] = {"new": "(hidden)", "old": "(hidden)"}
                else:
                    error = exc
        return changes, error

    def manage(self, login: str, password: str, email: str, first_name: str, last_name: str, use_pam_auth: bool = False,
               roles: Optional[List[str]] = [], system_groups: Optional[List[str]] = [],
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Ensure a user is present with all specified properties

        :param login: user login ID
        :param password: desired password for the user
        :param email: valid email address
        :param first_name: First name
        :param last_name: Last name
        :param use_pam_auth: if you wish to use PAM authentication for this user
        :param roles: roles to assign to user
        :param system_groups: system groups to assign user to
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :return: dict for Salt communication
        """
        current_user = None
        current_roles = None
        current_system_groups_names = None
        try:
            current_user = __salt__['uyuni.user_get_details'](login, org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)
            current_roles = __salt__['uyuni.user_list_roles'](login, org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)
            current_system_groups = __salt__['uyuni.user_list_assigned_system_groups'](login,
                                                                                       org_admin_user=org_admin_user,
                                                                                       org_admin_password=org_admin_password)
            current_system_groups_names = [s["name"] for s in (current_system_groups or [])]
        except Exception as exc:
            if exc.faultCode == AUTHENTICATION_ERROR:
                error_message = "Error while retrieving user information (admin credentials error) '{}': {}".format(
                    login, exc)
                log.warning(error_message)
                return StateResult.state_error(login, comment=error_message)

        user_paramters = {"login": login, "password": password, "email": email,
                          "first_name": first_name, "last_name": last_name,
                          "org_admin_user": org_admin_user, "org_admin_password": org_admin_password}

        changes, error = self._compute_changes(user_paramters, current_user,
                                               roles, current_roles,
                                               system_groups, current_system_groups_names,
                                               use_pam_auth=use_pam_auth)

        if error:
            return StateResult.state_error(login, "Error computing changes for user '{}': {}".format(login, error))
        if not changes:
            return StateResult.prepare_result(login, True, "{0} is already in the desired state".format(login))
        if not current_user:
            changes['login'] = {"new": login}
            changes['password'] = {"new": "(hidden)"}
        if __opts__['test']:
            return StateResult.prepare_result(login, None, "{0} would be modified".format(login), changes)

        try:
            if current_user:
                __salt__['uyuni.user_set_details'](**user_paramters)
            else:
                user_paramters["use_pam_auth"] = use_pam_auth
                __salt__['uyuni.user_create'](**user_paramters)

            self._update_user_roles(login, current_roles, roles,
                                    org_admin_user, org_admin_password)
            self._update_user_system_groups(login, current_system_groups_names, system_groups,
                                            org_admin_user, org_admin_password)
        except Exception as exc:
            return StateResult.state_error(login, "Error modifying user '{}': {}".format(login, exc))
        else:
            return StateResult.prepare_result(login, True, "{0} user successfully modified".format(login), changes)

    def delete(self, login: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni user

        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :param login: login of the user

        :return: dict for Salt communication
        """
        try:
            user = __salt__['uyuni.user_get_details'](login, org_admin_user=org_admin_user,
                                                      org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == NO_SUCH_USER_ERROR:
                return StateResult.prepare_result(login, True, "{0} is already absent".format(login))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(login,
                                               "Error deleting user (organization credentials error) '{}': {}".format(
                                                   login, exc))
            raise exc
        else:
            changes = {
                'login': {'old': login},
                'email': {'old': user.get('email')},
                'first_name': {'old': user.get('first_name')},
                'last_name': {'old': user.get('last_name')}
            }
            if __opts__['test']:
                return StateResult.prepare_result(login, None, "{0} would be deleted".format(login), changes)

            try:
                __salt__['uyuni.user_delete'](login,
                                              org_admin_user=org_admin_user,
                                              org_admin_password=org_admin_password)
                return StateResult.prepare_result(login, True, "User {} has been deleted".format(login), changes)
            except Exception as exc:
                return StateResult.state_error(login, "Error deleting user '{}': {}".format(login, exc))


class UyuniUserChannels:

    @staticmethod
    def process_changes(current_managed_channels: Optional[List[str]],
                        new_managed_channels: Optional[List[str]],
                        current_subscribe_channels: List[str],
                        new_subscribe_channels: List[str],
                        org_admin_user: str, org_admin_password: str) -> Dict[str, Dict[str, bool]]:
        managed_changes: Dict[str, bool] = {}
        managed_changes.update({new_ma: True for new_ma in (new_managed_channels or [])
                                if new_ma not in current_managed_channels})

        managed_changes.update({old_ma: False for old_ma in (current_managed_channels or [])
                                if old_ma not in new_managed_channels})

        subscribe_changes: Dict[str, bool] = {}
        for new_channel in (new_subscribe_channels or []):
            if new_channel not in (current_subscribe_channels or []) or not managed_changes.get(new_channel, True):
                subscribe_changes[new_channel] = True

        for curr_channel in (current_subscribe_channels or []):
            if not (curr_channel in new_subscribe_channels or curr_channel in new_managed_channels):
                if not __salt__['uyuni.channel_software_is_globally_subscribable'](curr_channel,
                                                                                   org_admin_user,
                                                                                   org_admin_password):
                    subscribe_changes[curr_channel] = False
        changes = {}
        if managed_changes:
            changes['manageable_channels'] = managed_changes
        if subscribe_changes:
            changes['subscribable_channels'] = subscribe_changes
        return changes

    def manage(self, login: str, password: str,
               manageable_channels: Optional[List[str]] = [],
               subscribable_channels: Optional[List[str]] = [],
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Modifies user-channel associations

        :param login: user login ID
        :param password: user password
        :param manageable_channels: channels user can manage
        :param subscribable_channels: channels user can subscribe
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password
        :return: dict for Salt communication
        """
        try:
            current_roles = __salt__['uyuni.user_list_roles'](login, password=password)
            current_manageable_channels = __salt__['uyuni.channel_list_manageable_channels'](login, password)
            current_subscribe_channels = __salt__['uyuni.channel_list_my_channels'](login, password)
        except Exception as exc:
            return StateResult.state_error(login,
                                           comment="Error retrieving information about user channels '{}': {}".format(
                                               login, exc))

        if "org_admin" in current_roles or "channel_admin" in current_roles:
            return StateResult.state_error(login, "Channels access cannot be changed, because "
                                                  "the target user can manage all channels in the organization "
                                                  "(having an \"org_admin\" or \"channel_admin\" role).")

        current_manageable_channels_list = [c.get("label") for c in (current_manageable_channels or [])]
        current_subscribe_channels_list = [c.get("label") for c in (current_subscribe_channels or [])]

        changes = self.process_changes(current_manageable_channels_list,
                                       manageable_channels,
                                       current_subscribe_channels_list, subscribable_channels,
                                       org_admin_user, org_admin_password)

        if not changes:
            return StateResult.prepare_result(login, True,
                                              "{0} channels are already in the desired state".format(login))
        if __opts__['test']:
            return StateResult.prepare_result(login, None, "{0} channels would be configured".format(login), changes)

        try:
            for channel, action in changes.get('manageable_channels', {}).items():
                __salt__['uyuni.channel_software_set_user_manageable'](channel, login, action,
                                                                       org_admin_user, org_admin_password)

            for channel, action in changes.get('subscribable_channels', {}).items():
                __salt__['uyuni.channel_software_set_user_subscribable'](channel, login, action,
                                                                         org_admin_user, org_admin_password)
        except Exception as exc:
            return StateResult.state_error(login, "Error changing channel assignments '{}': {}".format(login, exc))
        return StateResult.prepare_result(login, True, "Channel set to the desired state", changes)


class UyuniGroups:

    @staticmethod
    def _update_systems(name: str, new_systems: List[int], current_systems: List[int],
                        org_admin_user: str = None, org_admin_password: str = None):

        remove_systems = [sys for sys in current_systems if sys not in new_systems]
        if remove_systems:
            __salt__['uyuni.systemgroup_add_remove_systems'](name, False, remove_systems,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

        add_systems = [sys for sys in new_systems if sys not in current_systems]
        if add_systems:
            __salt__['uyuni.systemgroup_add_remove_systems'](name, True, add_systems,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

    @staticmethod
    def _get_systems_for_group(target: str, target_type: str = "glob",
                               org_admin_user: str = None, org_admin_password: str = None):

        selected_minions = __salt__['uyuni.master_select_minions'](target, target_type)
        available_system_ids = __salt__['uyuni.systems_get_minion_id_map'](org_admin_user, org_admin_password)

        return [
            available_system_ids[minion_id] for minion_id in selected_minions.get('minions', [])
            if minion_id in available_system_ids
        ]

    def manage(self, name: str, description: str, target: str, target_type: str = "glob",
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Create or update a system group

        :param name: group name
        :param description: group description
        :param target: target expression used to filter which minions should be part of the group
        :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
                pillar_exact, compound, compound_pillar_exact. Default: glob.
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        current_group = None
        current_systems = None
        try:
            current_group = __salt__['uyuni.systemgroup_get_details'](name,
                                                                      org_admin_user=org_admin_user,
                                                                      org_admin_password=org_admin_password)
            current_systems = __salt__['uyuni.systemgroup_list_systems'](name,
                                                                         org_admin_user=org_admin_user,
                                                                         org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode != SERVER_GROUP_NOT_FOUND_ERROR:
                return StateResult.state_error(name,
                                               "Error retrieving information about system group '{}': {}".format(name,
                                                                                                                 exc))

        current_systems_ids = [sys['id'] for sys in (current_systems or [])]
        systems_to_group = self._get_systems_for_group(target, target_type,
                                                       org_admin_user=org_admin_user,
                                                       org_admin_password=org_admin_password)

        changes = {}
        if description != (current_group or {}).get('description'):
            changes['description'] = {'new': description}
            if current_group:
                changes['description']['old'] = current_group["description"]

        if Counter(current_systems_ids or []) != Counter(systems_to_group or []):
            changes['systems'] = {'new': systems_to_group}
            if current_group:
                changes['systems']['old'] = current_systems_ids

        if not changes:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(name))

        if not current_group:
            changes["name"] = {"new": name}

        if __opts__['test']:
            return StateResult.prepare_result(name, None, "{0} would be updated".format(name), changes)

        try:
            if current_group:
                __salt__['uyuni.systemgroup_update'](name, description,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)

                self._update_systems(name,
                                     systems_to_group,
                                     current_systems_ids,
                                     org_admin_user=org_admin_user,
                                     org_admin_password=org_admin_password)
            else:
                __salt__['uyuni.systemgroup_create'](name, description,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)
                self._update_systems(name,
                                     systems_to_group,
                                     current_systems_ids,
                                     org_admin_user=org_admin_user,
                                     org_admin_password=org_admin_password)
        except Exception as exc:
            return StateResult.state_error(name, "Error updating group. '{}': {}".format(name, exc))
        else:
            return StateResult.prepare_result(name, True, "{0} successfully updated".format(name), changes)

    def delete(self, name: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni system group

        :param name: Group Name
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        try:
            current_group = __salt__['uyuni.systemgroup_get_details'](name,
                                                                      org_admin_user=org_admin_user,
                                                                      org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == SERVER_GROUP_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(name))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting group (organization admin credentials error) '{}': {}"
                                               .format(name, exc))
            raise exc
        else:
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be removed".format(name))
            try:
                __salt__['uyuni.systemgroup_delete'](name,
                                                     org_admin_user=org_admin_user,
                                                     org_admin_password=org_admin_password)
                return StateResult.prepare_result(name, True, "Group {} has been deleted".format(name),
                                                  {'name': {'old': current_group.get('name')},
                                                   'description': {'old': current_group.get('description')}})
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting group '{}': {}".format(name, exc))


class UyuniOrgs:

    @staticmethod
    def _compute_changes(user_changes: Dict[str, Any],
                         current_user: Dict[str, Any]) -> Dict[str, Any]:
        changes = {}
        for field in ["email", "first_name", "last_name"]:
            if (current_user or {}).get(field) != user_changes.get(field):
                changes[field] = {"new": user_changes[field]}
                if current_user:
                    changes[field]["old"] = (current_user or {}).get(field)
        return changes

    def manage(self, name: str, org_admin_user: str, org_admin_password: str, first_name: str,
               last_name: str, email: str, pam: bool = False,
               admin_user=None, admin_password=None) -> Dict[str, Any]:
        """
        Create or update an Uyuni organization.
        Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

        :param name: organization name
        :param org_admin_user: organization admin user
        :param org_admin_password: organization admin password
        :param first_name: organization admin first name
        :param last_name: organization admin last name
        :param email: organization admin email
        :param pam: organization admin pam authentication
        :param admin_user: uyuni admin user
        :param admin_password: uyuni admin password
        :return: dict for Salt communication
        """
        current_org = None
        current_org_admin = None
        try:
            current_org = __salt__['uyuni.org_get_details'](name,
                                                            admin_user=admin_user,
                                                            admin_password=admin_password)
            current_org_admin = __salt__['uyuni.user_get_details'](org_admin_user,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode != ORG_NOT_FOUND_ERROR:
                return StateResult.state_error(name,
                                               "Error retrieving information about organization '{}': {}".format(name,
                                                                                                                 exc))

        user_paramters = {"login": org_admin_user, "password": org_admin_password, "email": email,
                          "first_name": first_name, "last_name": last_name,
                          "org_admin_user": org_admin_user, "org_admin_password": org_admin_password}

        changes = self._compute_changes(user_paramters, current_org_admin)
        if not current_org:
            changes["org_name"] = {"new": name}
            changes["org_admin_user"] = {"new": org_admin_user}
            changes["pam"] = {"new": pam}

        if not changes:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(name))
        if __opts__['test']:
            return StateResult.prepare_result(name, None, "{0} would be updated".format(name), changes)

        try:
            if current_org:
                __salt__['uyuni.user_set_details'](**user_paramters)
            else:
                __salt__['uyuni.org_create'](name=name,
                                             org_admin_user=org_admin_user, org_admin_password=org_admin_password,
                                             first_name=first_name, last_name=last_name, email=email,
                                             admin_user=admin_user, admin_password=admin_password, pam=pam)

        except Exception as exc:
            return StateResult.state_error(name, "Error updating organization '{}': {}".format(name, exc))
        else:
            return StateResult.prepare_result(name, True, "{0} org successfully modified".format(name), changes)

    def delete(self, name: str, admin_user=None, admin_password=None) -> Dict[str, Any]:
        """
        Remove an Uyuni organization
        Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

        :param name: Organization Name
        :param admin_user: administrator username
        :param admin_password: administrator password

        :return: dict for Salt communication
        """
        try:
            current_org = __salt__['uyuni.org_get_details'](name,
                                                            admin_user=admin_user,
                                                            admin_password=admin_password)
        except Exception as exc:
            if exc.faultCode == ORG_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(name))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting organization (admin credentials error) '{}': {}"
                                               .format(name, exc))
            raise exc
        else:
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be removed".format(name))
            try:
                __salt__['uyuni.org_delete'](name,
                                             admin_user=admin_user,
                                             admin_password=admin_password)
                return StateResult.prepare_result(name, True, "Org {} has been deleted".format(name),
                                                  {'name': {'old': current_org.get('name')}})
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting Org '{}': {}".format(name, exc))


class UyuniOrgsTrust:

    def trust(self, name: str, org_name: str, trusted_orgs: List[str],
              admin_user: str = None, admin_password: str = None) -> Dict[str, Any]:
        """
        Establish trust relationships between organizations

        :param name: state name
        :param org_name: organization name
        :param trusted_orgs: list of organization names to trust
        :param admin_user: administrator username
        :param admin_password: administrator password

        :return: dict for Salt communication
        """
        try:
            current_org_trusts = __salt__['uyuni.org_trust_list_trusts'](org_name,
                                                                         admin_user=admin_user,
                                                                         admin_password=admin_password)
            current_org = __salt__['uyuni.org_get_details'](org_name,
                                                            admin_user=admin_user, admin_password=admin_password)
        except Exception as exc:
            return StateResult.state_error(name,
                                           "Error retrieving information about an organization trust'{}': {}".format(
                                               org_name, exc))

        trusts_to_add = []
        trusts_to_remove = []
        for org_trust in current_org_trusts:
            if org_trust.get("orgName") in (trusted_orgs or []) and not org_trust.get("trustEnabled"):
                trusts_to_add.append(org_trust)
            elif org_trust.get("orgName") not in (trusted_orgs or []) and org_trust.get("trustEnabled"):
                trusts_to_remove.append(org_trust)

        if not trusts_to_add and not trusts_to_remove:
            return StateResult.prepare_result(name, True, "{0} is already in the desired state".format(org_name))
        if __opts__['test']:
            changes = {}
            for org_add in trusts_to_add:
                changes[org_add.get("orgName")] = {'old': None, 'new': True}
            for org_remove in trusts_to_remove:
                changes[org_remove.get("orgName")] = {'old': True, 'new': None}
            return StateResult.prepare_result(name, None, "{0} would be created".format(org_name), changes)

        processed_changes = {}
        try:
            for org_add in trusts_to_add:
                __salt__['uyuni.org_trust_add_trust'](current_org.get("id"), org_add.get("orgId"),
                                                      admin_user=admin_user, admin_password=admin_password)
                processed_changes[org_add.get("orgName")] = {'old': None, 'new': True}
            for org_remove in trusts_to_remove:
                __salt__['uyuni.org_trust_remove_trust'](current_org.get("id"), org_remove.get("orgId"),
                                                         admin_user=admin_user, admin_password=admin_password)
                processed_changes[org_remove.get("orgName")] = {'old': True, 'new': None}
        except Exception as exc:
            return StateResult.prepare_result(name, False,
                                              "Error updating organization trusts '{}': {}".format(org_name, exc),
                                              processed_changes)
        return StateResult.prepare_result(name, True, "Org '{}' trusts successfully modified".format(org_name),
                                          processed_changes)


class UyuniActivationKeys:

    @staticmethod
    def _normalize_list_packages(list_packages: [Any]):
        return [(f['name'], f.get('arch', None)) for f in (list_packages or [])]

    @staticmethod
    def _compute_changes(ak_parameters: Dict[str, Any],
                         current_ak: Dict[str, Any],
                         configure_after_registration: bool,
                         current_configure_after_registration: bool,
                         current_config_channels: List[str],
                         configuration_channels: List[str]) -> Dict[str, Any]:
        changes = {}
        for field in ["description", 'base_channel', 'usage_limit', 'universal_default', 'contact_method']:
            if current_ak.get(field) != ak_parameters.get(field):
                changes[field] = {"new": ak_parameters[field]}
                if current_ak:
                    changes[field]["old"] = current_ak.get(field)

        # list fields
        for field in ['system_types', 'child_channels', 'server_groups']:
            if sorted((ak_parameters or {}).get(field) or []) != sorted(current_ak.get(field) or []):
                changes[field] = {"new": ak_parameters[field]}
                if current_ak:
                    changes[field]["old"] = current_ak.get(field)

        new_packages = UyuniActivationKeys._normalize_list_packages((ak_parameters or {}).get('packages', []))
        old_packages = UyuniActivationKeys._normalize_list_packages((current_ak or {}).get('packages', []))
        if sorted(new_packages) != sorted(old_packages):
            changes['packages'] = {"new": ak_parameters['packages']}
            if current_ak:
                changes['packages']["old"] = current_ak.get('packages')

        if configure_after_registration != current_configure_after_registration:
            changes['configure_after_registration'] = {"new": configure_after_registration}
            if current_configure_after_registration is not None:
                changes['configure_after_registration']["old"] = current_configure_after_registration

        # we don't want to sort configuration channels since the order matters in this case
        if (current_config_channels or []) != (configuration_channels or []):
            changes['configuration_channels'] = {"new": configuration_channels}
            if current_config_channels:
                changes['configuration_channels']['old'] = current_config_channels

        return changes

    @staticmethod
    def _update_system_type(current_system_types, new_system_types,
                            key, org_admin_user, org_admin_password):
        add_system_types = [t for t in new_system_types if t not in current_system_types]
        if add_system_types:
            __salt__['uyuni.activation_key_add_entitlements'](key, add_system_types,
                                                              org_admin_user=org_admin_user,
                                                              org_admin_password=org_admin_password)

        remove_system_types = [t for t in current_system_types if t not in new_system_types]
        if remove_system_types:
            __salt__['uyuni.activation_key_remove_entitlements'](key, remove_system_types,
                                                                 org_admin_user=org_admin_user,
                                                                 org_admin_password=org_admin_password)

    @staticmethod
    def _update_child_channels(current_child_channels, new_child_channels,
                               key, org_admin_user, org_admin_password):
        add_child_channels = [t for t in new_child_channels if t not in current_child_channels]
        if add_child_channels:
            __salt__['uyuni.activation_key_add_child_channels'](key, add_child_channels,
                                                                org_admin_user=org_admin_user,
                                                                org_admin_password=org_admin_password)

        remove_child_channels = [t for t in current_child_channels if t not in new_child_channels]
        if remove_child_channels:
            __salt__['uyuni.activation_key_remove_child_channels'](key, remove_child_channels,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)

    @staticmethod
    def _update_server_groups(current_server_groups, new_server_groups,
                               key, org_admin_user, org_admin_password):
        add_server_groups = [t for t in new_server_groups if t not in current_server_groups]
        if add_server_groups:
            __salt__['uyuni.activation_key_add_server_groups'](key, add_server_groups,
                                                                org_admin_user=org_admin_user,
                                                                org_admin_password=org_admin_password)

        remove_server_groups = [t for t in current_server_groups if t not in new_server_groups]
        if remove_server_groups:
            __salt__['uyuni.activation_key_remove_server_groups'](key, remove_server_groups,
                                                                   org_admin_user=org_admin_user,
                                                                   org_admin_password=org_admin_password)



    @staticmethod
    def _format_packages_data(packages):
        return [{'name': f[0], **(({'arch': f[1]}) if f[1] else {})} for f in packages]

    @staticmethod
    def _update_packages(current_packages, new_packages, key, org_admin_user, org_admin_password):

        new_packages_normalized = UyuniActivationKeys._normalize_list_packages(new_packages)
        current_packages_normalized = UyuniActivationKeys._normalize_list_packages(current_packages)
        add_packages = [t for t in new_packages_normalized if t not in current_packages_normalized]
        if add_packages:
            pass
            __salt__['uyuni.activation_key_add_packages'](key,
                                                          UyuniActivationKeys._format_packages_data(add_packages),
                                                          org_admin_user=org_admin_user,
                                                          org_admin_password=org_admin_password)

        remove_packages = [t for t in current_packages_normalized if t not in new_packages_normalized]
        if remove_packages:
            pass
            __salt__['uyuni.activation_key_remove_packages'](key,
                                                             UyuniActivationKeys._format_packages_data(remove_packages),
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

    def manage(self, name: str, description: str,
               base_channel: str = '',
               usage_limit: int = 0,
               contact_method: str = 'default',
               system_types: List[str] = [],
               universal_default: bool = False,
               child_channels: List[str] = [],
               configuration_channels: List[str] = [],
               packages: List[str] = [],
               server_groups: List[str] = [],
               configure_after_registration: bool = False,
               org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Ensure an Uyuni Activation Key is present.

        :param name: the Activation Key name
        :param description: the Activation description
        :param base_channel: base channel to be used
        :param usage_limit: activation key usage limit
        :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
        :param system_types: system types to be assigned.
                             Can be one of: 'virtualization_host', 'container_build_host',
                             'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
        :param universal_default: sets this activation key as organization universal default
        :param child_channels: list of child channels to be assigned
        :param configuration_channels: list of configuration channels to be assigned
        :param packages: list of packages which will be installed
        :param server_groups: list of server groups to assign the activation key with
        :param configure_after_registration: deploy configuration files to systems on registration
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return:  dict for Salt communication
        """
        current_ak = {}
        key = None
        current_configure_after_registration = None
        system_groups_keys = {}
        current_config_channels = []
        output_field_names = {
            'description': 'description',
            'base_channel_label': 'base_channel',
            'usage_limit': 'usage_limit',
            'universal_default': 'universal_default',
            'contact_method': 'contact_method',
            'entitlements': 'system_types',
            'child_channel_labels': 'child_channels',
            'server_group_ids': 'server_groups',
            'packages': 'packages'
        }
        try:
            all_groups = __salt__['uyuni.systemgroup_list_all_groups'](org_admin_user, org_admin_password)
            group_id_to_name = {}
            for g in (all_groups or []):
                system_groups_keys[g.get('name')] = g.get('id')
                group_id_to_name[g.get('id')] = g.get('name')

            current_org_user = __salt__['uyuni.user_get_details'](org_admin_user, org_admin_password)

            key = "{}-{}".format(current_org_user['org_id'], name)
            returned_ak = __salt__['uyuni.activation_key_get_details'](key, org_admin_user=org_admin_user,
                                                                       org_admin_password=org_admin_password)

            for returned_name, output_name in output_field_names.items():
                current_ak[output_name] = returned_ak[returned_name]

            current_ak['server_groups'] = [group_id_to_name[s] for s in (current_ak['server_groups'] or [])]

            if current_ak.get('base_channel', None) == 'none':
                current_ak['base_channel'] = ''

            current_configure_after_registration = __salt__['uyuni.activation_key_check_config_deployment'](key,
                                                                                                            org_admin_user,
                                                                                                            org_admin_password)

            config_channels_output = __salt__['uyuni.activation_key_list_config_channels'](key,
                                                                                            org_admin_user,
                                                                                            org_admin_password)
            current_config_channels = [cc['label'] for cc in (config_channels_output or [])]

        except Exception as exc:
            if exc.faultCode != ACTIVATION_KEY_NOT_FOUND_ERROR:
                return StateResult.state_error(key, "Error retrieving information about Activation Key '{}': {}".format(key, exc))

        ak_paramters = {'description': description,
                        'base_channel': base_channel,
                        'usage_limit': usage_limit,
                        'contact_method': contact_method,
                        'system_types': system_types,
                        'universal_default': universal_default,
                        'child_channels': child_channels,
                        'server_groups': server_groups,
                        'packages': packages}

        changes = self._compute_changes(ak_paramters, current_ak,
                                        configure_after_registration,
                                        current_configure_after_registration,
                                        current_config_channels,
                                        configuration_channels)

        if not current_ak:
            changes["key"] = {"new": key}

        if not changes:
            return StateResult.prepare_result(key, True, "{0} is already in the desired state".format(key))
        if __opts__['test']:
            return StateResult.prepare_result(key, None, "{0} would be updated".format(key), changes)

        try:
            if current_ak:
                __salt__['uyuni.activation_key_set_details'](key,
                                                             description=description,
                                                             contact_method=contact_method,
                                                             base_channel_label=base_channel,
                                                             usage_limit=usage_limit,
                                                             universal_default=universal_default,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

                if changes.get('system_types', False):
                    self._update_system_type(current_ak.get('system_types', []), system_types or [],
                                             key, org_admin_user, org_admin_password)

            else:
                __salt__['uyuni.activation_key_create'](key=name,
                                                        description=description,
                                                        base_channel_label=base_channel,
                                                        usage_limit=usage_limit,
                                                        system_types=system_types,
                                                        universal_default=universal_default,
                                                        org_admin_user=org_admin_user,
                                                        org_admin_password=org_admin_password)

                __salt__['uyuni.activation_key_set_details'](key, contact_method=contact_method,
                                                             usage_limit=usage_limit,
                                                             org_admin_user=org_admin_user,
                                                             org_admin_password=org_admin_password)

            if changes.get('child_channels', False):
                self._update_child_channels(current_ak.get('child_channels', []),
                                            child_channels or [],
                                            key, org_admin_user, org_admin_password)

            if changes.get('server_groups', False):
                old_server_groups_id = [system_groups_keys[s] for s in current_ak.get('server_groups', [])]
                new_server_groups_id = [system_groups_keys[s] for s in (server_groups or [])]
                self._update_server_groups(old_server_groups_id,
                                           new_server_groups_id,
                                           key, org_admin_user, org_admin_password)

            if changes.get('configure_after_registration', False):
                if configure_after_registration:
                    __salt__['uyuni.activation_key_enable_config_deployment'](key,
                                                                              org_admin_user=org_admin_user,
                                                                              org_admin_password=org_admin_password)
                else:
                    if current_ak:
                        __salt__['uyuni.activation_key_disable_config_deployment'](key,
                                                                                   org_admin_user=org_admin_user,
                                                                                   org_admin_password=org_admin_password)

            if changes.get('packages', False):
                self._update_packages(current_ak.get('packages', []),
                                           packages or [],
                                            key, org_admin_user, org_admin_password)

            if changes.get('configuration_channels', False):
                __salt__['uyuni.activation_key_set_config_channels']([key],
                                                                     config_channel_label=configuration_channels,
                                                                     org_admin_user=org_admin_user,
                                                                     org_admin_password=org_admin_password)

        except Exception as exc:
            return StateResult.state_error(key, "Error updating activation key '{}': {}".format(key, exc))
        else:
            return StateResult.prepare_result(key, True, "{0} activation key successfully modified".format(key), changes)

    def delete(self, name: str, org_admin_user: str = None, org_admin_password: str = None) -> Dict[str, Any]:
        """
        Remove an Uyuni Activation Key.

        :param name: the Activation Key Name
        :param org_admin_user: organization administrator username
        :param org_admin_password: organization administrator password

        :return: dict for Salt communication
        """
        try:
            current_org_user = __salt__['uyuni.user_get_details'](org_admin_user, org_admin_password)
            key = "{}-{}".format(current_org_user['org_id'], name)
            ak = __salt__['uyuni.activation_key_get_details'](key, org_admin_user=org_admin_user,
                                                                       org_admin_password=org_admin_password)
        except Exception as exc:
            if exc.faultCode == ACTIVATION_KEY_NOT_FOUND_ERROR:
                return StateResult.prepare_result(name, True, "{0} is already absent".format(key))
            if exc.faultCode == AUTHENTICATION_ERROR:
                return StateResult.state_error(name,
                                               "Error deleting Activation Key (organization credentials error) '{}': {}"
                                               .format(key, exc))
            raise exc
        else:
            changes = {
                'id': {'old': key},
            }
            if __opts__['test']:
                return StateResult.prepare_result(name, None, "{0} would be deleted".format(key), changes)

            try:
                __salt__['uyuni.activation_key_delete'](key,
                                                        org_admin_user=org_admin_user,
                                                        org_admin_password=org_admin_password)
                return StateResult.prepare_result(name, True, "Activation Key {} has been deleted".format(key), changes)
            except Exception as exc:
                return StateResult.state_error(name, "Error deleting Activation Key '{}': {}".format(key, exc))


def __virtual__():
    return __virtualname__


def user_present(name, password, email, first_name, last_name, use_pam_auth=False,
                 roles=None, system_groups=None,
                 org_admin_user=None, org_admin_password=None):
    """
    Create or update an Uyuni user

    :param name: user login name
    :param password: desired password for the user
    :param email: valid email address
    :param first_name: First name
    :param last_name: Last name
    :param use_pam_auth: if you wish to use PAM authentication for this user
    :param roles: roles to assign to user
    :param system_groups: system_groups to assign to user
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniUsers().manage(name, password, email, first_name, last_name, use_pam_auth,
                               roles, system_groups,
                               org_admin_user, org_admin_password)


def user_channels(name, password,
                  manageable_channels=[], subscribable_channels=[],
                  org_admin_user=None, org_admin_password=None):
    """
    Ensure a user has access to the specified channels

    :param name: user login name
    :param password: user password
    :param manageable_channels: channels user can manage
    :param subscribable_channels: channels user can subscribe
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniUserChannels().manage(name, password,
                                      manageable_channels, subscribable_channels,
                                      org_admin_user, org_admin_password)


def user_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni user is not present.

    :param name: user login name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniUsers().delete(name, org_admin_user, org_admin_password)


def org_present(name, org_admin_user, org_admin_password,
                first_name, last_name, email, pam=False,
                admin_user=None, admin_password=None):
    """
    Create or update an Uyuni organization
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param org_admin_user: organization admin user
    :param org_admin_password: organization admin password
    :param first_name: organization admin first name
    :param last_name: organization admin last name
    :param email: organization admin email
    :param pam: organization admin pam authentication
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dict for Salt communication
    """
    return UyuniOrgs().manage(name, org_admin_user, org_admin_password, first_name,
                              last_name, email, pam,
                              admin_user, admin_password)


def org_absent(name, admin_user=None, admin_password=None):
    """
    Ensure an Uyuni organization is not present
    Note: the configured admin user must have the SUSE Manager/Uyuni Administrator role to perform this action

    :param name: organization name
    :param admin_user: uyuni admin user
    :param admin_password: uyuni admin password

    :return: dict for Salt communication
    """
    return UyuniOrgs().delete(name, admin_user, admin_password)


def org_trust(name, org_name, trusts, admin_user=None, admin_password=None):
    """
    Establish trust relationships between Uyuni organizations.

    :param name: state name
    :param org_name: Organization name
    :param trusts: list of organization names to trust
    :param admin_user: administrator username
    :param admin_password: administrator password

    :return: dict for Salt communication
    """
    return UyuniOrgsTrust().trust(name, org_name, trusts, admin_user, admin_password)


def group_present(name, description, target=None, target_type="glob",
                  org_admin_user=None, org_admin_password=None):
    """
    Create or update an Uyuni system group

    :param name: group name
    :param description: group description
    :param target: target expression used to filter which minions should be part of the group
    :param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
            pillar_exact, compound, compound_pillar_exact. Default: glob.
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniGroups().manage(name, description, target, target_type,
                                org_admin_user, org_admin_password)


def group_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni system group is not present

    :param name: Group Name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return: dict for Salt communication
    """
    return UyuniGroups().delete(name, org_admin_user, org_admin_password)


def activation_key_absent(name, org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni Activation Key is not present.

    :param name: the Activation Key name
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniActivationKeys().delete(name, org_admin_user, org_admin_password)


def activation_key_present(name,
                           description,
                           base_channel='',
                           usage_limit=0,
                           contact_method='default',
                           system_types=[],
                           universal_default=False,
                           child_channels=[],
                           configuration_channels=[],
                           packages=[],
                           server_groups=[],
                           configure_after_registration=False,
                           org_admin_user=None, org_admin_password=None):
    """
    Ensure an Uyuni Activation Key is present.

    :param name: the Activation Key name
    :param description: the Activation description
    :param base_channel: base channel to be used
    :param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
    :param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
    :param system_types: system types to be assigned.
                         Can be one of: 'virtualization_host', 'container_build_host',
                         'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
    :param universal_default: sets this activation key as organization universal default
    :param child_channels: list of child channels to be assigned
    :param configuration_channels: list of configuration channels to be assigned
    :param packages: list of packages which will be installed
    :param server_groups: list of server groups to assign the activation key with
    :param configure_after_registration: deploy configuration files to systems on registration
    :param org_admin_user: organization administrator username
    :param org_admin_password: organization administrator password

    :return:  dict for Salt communication
    """
    return UyuniActivationKeys().manage(name, description,
                                        base_channel=base_channel,
                                        usage_limit=usage_limit,
                                        contact_method=contact_method,
                                        system_types=system_types,
                                        universal_default=universal_default,
                                        child_channels=child_channels,
                                        configuration_channels=configuration_channels,
                                        packages=packages,
                                        server_groups=server_groups,
                                        configure_after_registration=configure_after_registration,
                                        org_admin_user=org_admin_user,
                                        org_admin_password=org_admin_password)
070701000000D4000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001A00000000susemanager-sls/src/tests 070701000000D5000081B400000000000000000000000160C1E96E000000A8000000000000000000000000000000000000002400000000susemanager-sls/src/tests/README.md   ## Running tests

Run tests from _this_ directory. PyTest installed is required.
To run the tests, issue the following command:

  py.test <ENTER>

That's all for now.
070701000000D6000081B400000000000000000000000160C1E96E00000000000000000000000000000000000000000000002600000000susemanager-sls/src/tests/__init__.py 070701000000D7000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001F00000000susemanager-sls/src/tests/data    070701000000D8000081B400000000000000000000000160C1E96E000000ED000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/cpuinfo.ppc64le.sample processor	: 0
cpu		: POWER8E (raw), altivec supported
clock		: 3425.000000MHz
revision	: 2.1 (pvr 004b 0201)

timebase	: 512000000
platform	: pSeries
model		: IBM pSeries (emulated by qemu)
machine		: CHRP IBM pSeries (emulated by qemu)
   070701000000D9000081B400000000000000000000000160C1E96E00000303000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/cpuinfo.s390.sample    vendor_id       : IBM/S390
# processors    : 1
bogomips per cpu: 2913.00
features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF,  identification = 0F9A27,  machine = 2827
 070701000000DA000081B400000000000000000000000160C1E96E000010C4000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/data/cpuinfo.sample processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1314.117
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 0
initial apicid	: 0
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2100.109
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 1
initial apicid	: 1
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1718.742
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 2
initial apicid	: 2
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2108.335
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 3
initial apicid	: 3
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

070701000000DB000081B400000000000000000000000160C1E96E000006CF000000000000000000000000000000000000003000000000susemanager-sls/src/tests/data/dmidecode.sample   # dmidecode 3.0
Getting SMBIOS data from sysfs.
SMBIOS 2.7 present.

Handle 0x0004, DMI type 4, 42 bytes
Processor Information
	Socket Designation: U3E1
	Type: Central Processor
	Family: Core i7
	Manufacturer: Intel(R) Corporation
	ID: D4 06 03 00 FF FB EB BF
	Signature: Type 0, Family 6, Model 61, Stepping 4
	Flags:
		FPU (Floating-point unit on-chip)
		VME (Virtual mode extension)
		DE (Debugging extension)
		PSE (Page size extension)
		TSC (Time stamp counter)
		MSR (Model specific registers)
		PAE (Physical address extension)
		MCE (Machine check exception)
		CX8 (CMPXCHG8 instruction supported)
		APIC (On-chip APIC hardware supported)
		SEP (Fast system call)
		MTRR (Memory type range registers)
		PGE (Page global enable)
		MCA (Machine check architecture)
		CMOV (Conditional move instruction supported)
		PAT (Page attribute table)
		PSE-36 (36-bit page size extension)
		CLFSH (CLFLUSH instruction supported)
		DS (Debug store)
		ACPI (ACPI supported)
		MMX (MMX technology supported)
		FXSR (FXSAVE and FXSTOR instructions supported)
		SSE (Streaming SIMD extensions)
		SSE2 (Streaming SIMD extensions 2)
		SS (Self-snoop)
		HTT (Multi-threading)
		TM (Thermal monitor supported)
		PBE (Pending break enabled)
	Version: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
	Voltage: 1.1 V
	External Clock: 100 MHz
	Max Speed: 3600 MHz
	Current Speed: 2600 MHz
	Status: Populated, Enabled
	Upgrade: Socket BGA1168
	L1 Cache Handle: 0x0005
	L2 Cache Handle: 0x0006
	L3 Cache Handle: 0x0007
	Serial Number: None
	Asset Tag: None
	Part Number: None
	Core Count: 2
	Core Enabled: 2
	Thread Count: 4
	Characteristics:
		64-bit capable
		Multi-Core
		Hardware Thread
		Execute Protection
		Enhanced Virtualization
		Power/Performance Control

 070701000000DC000081B400000000000000000000000160C1E96E000000D7000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-1.sample  kgraft_patch_1_2_2
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-1-2.2.x86_64
    CVE: (none - this is an initial kGraft patch)
    bug fixes and enhancements: (none)

kgraft_patch_2_2_1
    active: 0
 070701000000DD000081B400000000000000000000000160C1E96E000000CA000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-2.sample  kgraft_patch_1_2_2
    active: 0

kgraft_patch_2_2_1
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-2-2.1.x86_64
    CVE: CVE-2016-8666 CVE-2016-6480
    bug fixes and enhancements: (none)
  070701000000DE000081B400000000000000000000000160C1E96E000000C7000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu.ppc64le.sample   # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i
0,0,0,0,,0,0

 070701000000DF000081B400000000000000000000000160C1E96E000000D1000000000000000000000000000000000000003100000000susemanager-sls/src/tests/data/lscpu.s390.sample  # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2d,L2i
0,0,0,,,0,0,0,0
   070701000000E0000081B400000000000000000000000160C1E96E00000103000000000000000000000000000000000000002C00000000susemanager-sls/src/tests/data/lscpu.sample   # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,0,0,0,,0,0,0,0
2,1,0,0,,1,1,1,0
3,1,0,0,,1,1,1,0
 070701000000E1000081B400000000000000000000000160C1E96E000004D9000000000000000000000000000000000000002B00000000susemanager-sls/src/tests/data/udev.sample    P: /devices/LNXSYSTM:00/LNXPWRBN:00
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00
E: DRIVER=button
E: MODALIAS=acpi:LNXPWRBN:
E: SUBSYSTEM=acpi

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: EV=3
E: ID_FOR_SEAT=input-acpi-LNXPWRBN_00
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: KEY=10000000000000 0
E: MODALIAS=input:b0019v0000p0001e0000-e0,1,k74,ramlsfw
E: NAME="Power Button"
E: PHYS="LNXPWRBN/button/input0"
E: PRODUCT=19/0/1/0
E: PROP=0
E: SUBSYSTEM=input
E: TAGS=:seat:
E: USEC_INITIALIZED=2010022

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
N: input/event2
E: BACKSPACE=guess
E: DEVNAME=/dev/input/event2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: MAJOR=13
E: MINOR=66
E: SUBSYSTEM=input
E: TAGS=:power-switch:
E: USEC_INITIALIZED=2076101
E: XKBLAYOUT=us
E: XKBMODEL=pc105

P: /devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVPATH=/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVTYPE=scsi_device
E: DRIVER=sd
E: MODALIAS=scsi:t-0x00
E: SUBSYSTEM=scsi
   070701000000E2000081B400000000000000000000000160C1E96E000000E7000000000000000000000000000000000000003900000000susemanager-sls/src/tests/data/virt_state-test.initcache  (dp1
S'domain_data'
p2
(dp3
I5
(dp4
S'name'
p5
S'testvm'
p6
sS'virt_type'
p7
S'para_virtualized'
p8
sS'state'
p9
S'running'
p10
sS'vcpus'
p11
I2
sS'memory_size'
p12
S'1024'
p13
sS'uuid'
p14
I5
sssS'expire_time'
p15
L2141506800L
s.
 070701000000E3000081B400000000000000000000000160C1E96E0000040E000000000000000000000000000000000000002500000000susemanager-sls/src/tests/mockery.py  import sys
import os
try:
    from cStringIO import StringIO
except ImportError:
    from io import StringIO
from mock import MagicMock


def setup_environment():
    '''
    Mock the environment.
    :return:
    '''
    if 'salt' not in sys.modules or not isinstance(sys.modules['salt'], MagicMock):
        sys.modules['salt'] = MagicMock()
        sys.modules['salt.config'] = MagicMock()
        sys.modules['salt.utils'] = MagicMock()
        sys.modules['salt.utils.versions'] = MagicMock()
        sys.modules['salt.utils.odict'] = MagicMock()
        sys.modules['salt.utils.minions'] = MagicMock()
        sys.modules['salt.modules'] = MagicMock()
        sys.modules['salt.modules.cmdmod'] = MagicMock()
        sys.modules['salt.states'] = MagicMock()
        sys.modules['salt.exceptions'] = MagicMock(CommandExecutionError=Exception)


def get_test_data(filename):
    '''
    Get a test data.

    :param filename:
    :return:
    '''
    return open(os.path.sep.join([os.path.abspath(''), 'data', filename]), 'r').read()
  070701000000E4000081B400000000000000000000000160C1E96E00000518000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_beacon_pkgset.py   '''
Author: Bo Maryniuk <bo@suse.de>
'''

from mock import MagicMock, patch
from ..beacons import pkgset

pkgset.__context__ = dict()


def test_virtual():
    '''
    Test virtual function.
    '''
    with patch.object(pkgset.os.path, "exists", MagicMock(return_value=True)):
        assert pkgset.__virtual__() == pkgset.__virtualname__
    with patch.object(pkgset.os.path, "exists", MagicMock(return_value=False)):
        assert pkgset.__virtual__() != pkgset.__virtualname__


def test_validate():
    '''
    Test validate() function
    '''
    res, msg = pkgset.validate({'cookie': '/bogus/path'})
    assert res is True
    assert msg == 'Configuration validated'

    for cfg in [{}, {'bogus': 'data'}]:
        res, msg = pkgset.validate(cfg)
        assert res is False
        assert msg == 'Cookie path has not been set.'


@patch.object(pkgset.os.path, 'exists', MagicMock(return_value=True))
@patch.object(pkgset, '__context__', {pkgset.__virtualname__: ""})
def test_beacon():
    '''
    Test beacon functionality.
    '''
    mock_content = MagicMock(
        **{'return_value.__enter__.return_value.read.return_value.strip.return_value': 'test'}
    )
    with patch.object(pkgset, 'open', mock_content):
        data = pkgset.beacon({})
        assert data == [{'tag': 'changed'}]
070701000000E5000081B400000000000000000000000160C1E96E00001837000000000000000000000000000000000000003400000000susemanager-sls/src/tests/test_beacon_virtpoller.py   '''
Author: Michael Calmer <mc@suse.com>
'''
import sys
import os
import shutil
from mock import MagicMock, patch
sys.modules['libvirt'] = MagicMock()
from ..beacons import virtpoller

virtpoller.__context__ = dict()

CACHE_FILE = '/tmp/virt_state-test.cache'

def test_virtual():
    '''
    Test virtual function.
    '''
    #with patch(virtpoller.HAS_LIBVIRT, True):
    assert virtpoller.__virtual__() == virtpoller.__virtualname__


def test_validate():
    '''
    Test validate() function
    '''
    res, msg = virtpoller.validate({'cache_file': '/bogus/path',
                                    'expire_time': 2})
    assert res is True
    assert msg == 'Configuration validated'


def test_beacon():
    '''
    Test beacon functionality.
    First run without cache file. All systems are "new" and should be reported.
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [0, 1024, 1024, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'exists'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'running'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '1024'
    assert data['uuid'] == 5

def test_beacon_update():
    '''
    Test beacon functionality. Second run with cache file.
    Nothing has changed so the return value of the function
    Should be an empty list
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [0, 1024, 1024, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    print("%s" % ret)
    assert len(ret) == 0

def test_beacon_change():
    '''
    Test beacon functionality. Another run with cache file.
    There are changes so it should report the new values.
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [4, 1024, 2048, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'exists'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'stopped'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '2048'
    assert data['uuid'] == 5

def test_beacon_remove():
    '''
    Test beacon functionality. Another run with cache file.
    The former host is not available anymore. Report should
    say "removed"
    '''

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = []

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'removed'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'running'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '1024'
    assert data['uuid'] == 5

 070701000000E6000081B400000000000000000000000160C1E96E00000BA4000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_grains_cpuinfo.py  '''
Author: bo@suse.de
'''

from mock import MagicMock, patch, mock_open
from . import mockery
mockery.setup_environment()

from ..grains import cpuinfo


def test_total_num_cpus():
    '''
    Test total_num_cpus function.

    :return:
    '''
    os_listdir = ['cpu0', 'cpu1', 'cpu2', 'cpu3', 'cpufreq', 'cpuidle', 'power', 'modalias',
                  'kernel_max', 'possible', 'online', 'offline', 'isolated', 'uevent',
                  'intel_pstate', 'microcode', 'present']

    with patch('os.path.exists', MagicMock(return_value=True)):
        with patch('os.listdir', MagicMock(return_value=os_listdir)):
            cpus = cpuinfo.total_num_cpus()
            assert type(cpus) == dict
            assert 'total_num_cpus' in cpus
            assert cpus['total_num_cpus'] == 4


def test_cpusockets_dmidecode():
    '''
    Test dmidecode sub in cpusockets function.

    :return:
    '''

    sample = mockery.get_test_data('dmidecode.sample')
    cpuinfo.log = MagicMock()
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        with patch.dict(cpuinfo.__salt__, {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': sample})}):
            out = cpuinfo._dmidecode([])
            assert type(out) == dict
            assert 'cpusockets' in out
            assert out['cpusockets'] == 1


def test_cpusockets_parse_cpuinfo():
    '''
    Test parse_cpuinfo sub in cpusockets function.

    :return:
    '''
    cpuinfo.log = MagicMock()
    # cpuinfo parser is not applicable for non-Intel architectures, so should return nothing.
    for sample_name in ['cpuinfo.s390.sample', 'cpuinfo.ppc64le.sample']:
        with patch('os.access', MagicMock(return_value=True)):
            with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data(sample_name)), create=True):
                assert cpuinfo._parse_cpuinfo([]) is None

    with patch('os.access', MagicMock(return_value=True)):
        with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data('cpuinfo.sample')), create=True):
            out = cpuinfo._parse_cpuinfo([])
            assert type(out) == dict
            assert 'cpusockets' in out
            assert out['cpusockets'] == 1


def test_cpusockets_lscpu():
    '''
    Test lscpu sub in cpusockets function.

    :return:
    '''
    for fn_smpl in ['lscpu.ppc64le.sample', 'lscpu.s390.sample', 'lscpu.sample']:
        cpuinfo.log = MagicMock()
        with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
            with patch.dict(cpuinfo.__salt__,
                            {'cmd.run_all': MagicMock(return_value={'retcode': 0,
                                                                    'stdout': mockery.get_test_data(fn_smpl)})}):
                out = cpuinfo._lscpu([])
                assert type(out) == dict
                assert 'cpusockets' in out
                assert out['cpusockets'] == 1

070701000000E7000081B400000000000000000000000160C1E96E000005E3000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_mgr_master_tops.py # -*- coding: utf-8 -*-
'''
:codeauthor:    Pablo Suárez Hernández <psuarezhernandez@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

import sys
sys.path.append("../../modules/tops")

import mgr_master_tops

TEST_MANAGER_STATIC_TOP = {
    "base": [
        "channels",
        "certs",
        "packages",
        "custom",
        "custom_groups",
        "custom_org",
        "formulas",
        "services.salt-minion",
        "services.docker",
        "services.kiwi-image-server"
    ]
}


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert mgr_master_tops.__virtual__() == "mgr_master_tops"


def test_top_default_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when no environment has been specified.
    '''
    kwargs = {'opts': {'environment': None}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_base_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when environment is set to "base".
    '''
    kwargs = {'opts': {'environment': 'base'}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_unknown_saltenv():
    '''
    Test if top function is returning None for unknown salt environments.
    '''
    kwargs = {'opts': {'environment': 'otherenv'}}
    assert mgr_master_tops.top(**kwargs) == None
 070701000000E8000081B400000000000000000000000160C1E96E000004A1000000000000000000000000000000000000003A00000000susemanager-sls/src/tests/test_module_mainframesysinfo.py '''
Author: Bo Maryniuk <bo@suse.de>
'''

import pytest
from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import mainframesysinfo


def test_virtual():
    '''
    Test virtual returns True if setup os.access returns positive, and otherwise.

    :return:
    '''

    with patch('os.access', MagicMock(return_value=True)):
        assert mainframesysinfo.__virtual__() is True

    with patch('os.access', MagicMock(return_value=False)):
        assert mainframesysinfo.__virtual__() is False


def test_read_values():
    '''
    Test the read_values method.

    :return:
    '''
    bogus_data = "bogus data"
    run_all = {'stdout': bogus_data, 'retcode': 0, 'stderr': ''}
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        assert mainframesysinfo.read_values() == bogus_data

    run_all['retcode'] = 1
    run_all['stderr'] = 'error here'
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        with pytest.raises(Exception) as x:
            mainframesysinfo.read_values()
        assert str(x.value) == run_all['stderr']
   070701000000E9000081B400000000000000000000000160C1E96E00000608000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_module_sumautil.py '''
Author: mc@suse.com
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import sumautil


def test_livepatching_kernelliveversion():
    '''
    Test kernel_live_version.

    :return:
    '''

    sumautil.log = MagicMock()
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-1.sample')}
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_1_2_2'

        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-2.sample') }
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_2_2_1'

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        out = sumautil.get_kernel_live_version()
        assert out is None
070701000000EA000081B400000000000000000000000160C1E96E00000F03000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_udevdb.py   '''
Author: Bo Maryniuk <bo@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import udevdb


def test_virtual():
    '''
    Test virtual returns True if 'udevadm' is around in the environment.

    :return:
    '''
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        assert udevdb.__virtual__() is False

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        assert udevdb.__virtual__() is True


def test_normalize():
    '''
    Test if udevdb.normalize does not returns nested lists that contains only one item.

    :return:
    '''
    data = {'key': ['value', 'here'], 'foo': ['bar'], 'some': 'data'}
    assert udevdb.normalize(data) == {'foo': 'bar', 'some': 'data', 'key': ['value', 'here']}


def test_exportdb():
    '''
    Test udevdb.exportdb method.

    :return:
    '''
    udev_data = mockery.get_test_data('udev.sample')
    out = [{'P': '/devices/LNXSYSTM:00/LNXPWRBN:00',
            'E': {'MODALIAS': 'acpi:LNXPWRBN:',
                  'SUBSYSTEM': 'acpi',
                  'DRIVER': 'button',
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00'}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
            'E': {'SUBSYSTEM': 'input',
                  'PRODUCT': '19/0/1/0',
                  'PHYS': '"LNXPWRBN/button/input0"',
                  'NAME': '"Power Button"',
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
                  'MODALIAS': 'input:b0019v0000p0001e0000-e0,1,k74,ramlsfw',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'TAGS': ':seat:',
                  'PROP': 0,
                  'ID_FOR_SEAT': 'input-acpi-LNXPWRBN_00',
                  'KEY': '10000000000000 0',
                  'USEC_INITIALIZED': 2010022,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'EV': 3,
                  'ID_INPUT_KEY': 1}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
            'E': {'SUBSYSTEM': 'input',
                  'XKBLAYOUT': 'us',
                  'MAJOR': 13,
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'DEVNAME': '/dev/input/event2',
                  'TAGS': ':power-switch:',
                  'BACKSPACE': 'guess',
                  'MINOR': 66,
                  'USEC_INITIALIZED': 2076101,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'XKBMODEL': 'pc105',
                  'ID_INPUT_KEY': 1},
            'N': 'input/event2'},
           {'P': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0',
            'E': {'MODALIAS': 'scsi:t-0x00',
                  'SUBSYSTEM': 'scsi',
                  'DEVTYPE': 'scsi_device',
                  'DRIVER': 'sd',
                  'DEVPATH': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0'
                  },
            'X-Mgr': {'SCSI_SYS_TYPE': '0'}},
           ]

    with patch.dict(udevdb.__salt__, {'cmd.run_all': MagicMock(side_effect=[{'retcode': 0, 'stdout': udev_data},
                                                                            {'retcode': 0, 'stdout': '0'}])}):
        data = udevdb.exportdb()
        assert data == [_f for _f in data if _f]

        for d_idx, d_section in enumerate(data):
            assert out[d_idx]['P'] == d_section['P']
            assert out[d_idx].get('N') == d_section.get('N')
            assert out[d_idx].get('X-Mgr') == d_section.get('X-Mgr')
            for key, value in list(d_section['E'].items()):
                assert out[d_idx]['E'][key] == value
 070701000000EB000081B400000000000000000000000160C1E96E00001ADD000000000000000000000000000000000000003600000000susemanager-sls/src/tests/test_module_uyuni_config.py '''
Author: Ricardo Mateus <rmateus@suse.com>
'''

import pytest
from mock import MagicMock, patch, call
from . import mockery

mockery.setup_environment()

import sys

from ..modules import uyuni_config
from ..modules.uyuni_config import RPCClient, UyuniChannelsException, UyuniUsersException

class TestRPCClient:
    """
    Test RPCClient object
    """
    rpc_client = None

    @patch("src.modules.uyuni_config.ssl", MagicMock())
    @patch("src.modules.uyuni_config.xmlrpc", MagicMock())
    def setup_method(self, method):
        """
        Setup state per test.

        :param method:
        :return:
        """
        self.rpc_client = RPCClient(user="user", password="password", url="https://somewhere")
        self.rpc_client.conn.auth.login = MagicMock(return_value="My_token")
        self.rpc_client.conn = MagicMock()

    def teardown_method(self, method):
        """
        Tear-down state per test.

        :param method:
        :return:
        """
        self.rpc_client = None
        uyuni_config.__pillar__ = {}

    def test_init_called(self):
        """
        Init method called

        :return:
        """
        assert self.rpc_client.get_user() == 'user'
        assert self.rpc_client.token is None

    def test_init_called_without_pillar(self):
        """
        Init method called without user password and without any pillar data

        :return:
        """
        with pytest.raises(UyuniUsersException):
            RPCClient(user="user")

    def test_init_called_with_pillar(self):
        """
        Init method called without user password and with pillar data defined

        :return:
        """
        uyuni_config.__pillar__ = {
            "uyuni": {
                "xmlrpc": {
                    "user": "admin_user",
                    "password": "password_user"
                }
            }
        }

        rpc_client = RPCClient(user="user")
        assert rpc_client.get_user() == 'admin_user'
        assert rpc_client._user == 'admin_user'
        assert rpc_client._password == 'password_user'
        assert rpc_client.token is None

    def test_get_token(self):
        """
        Test get_token method with reuse token

        :return:
        """
        my_mock1 = MagicMock(return_value="My_Special_Token")
        my_mock2 = MagicMock(return_value="My_Special_Token_2")
        self.rpc_client.conn.auth.login = my_mock1
        token = self.rpc_client.get_token()

        assert my_mock1.call_count == 1
        assert token == "My_Special_Token"
        assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token"

        self.rpc_client.get_token()
        assert my_mock1.call_count == 1

        self.rpc_client.conn.auth.login = my_mock2
        self.rpc_client.get_token()
        assert my_mock1.call_count == 1
        assert my_mock2.call_count == 0

        token = self.rpc_client.get_token(True)
        assert my_mock1.call_count == 1
        assert my_mock2.call_count == 1
        assert token == "My_Special_Token_2"
        assert uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token_2"

    def test_call_rpc(self):
        """
        Call any XML-RPC method.

        :return:
        """
        self.rpc_client.token = "My_token"
        out = self.rpc_client("uyuni.some_method")
        mo = getattr(self.rpc_client.conn, "uyuni.some_method")
        assert out is not None
        assert mo.called
        mo.assert_called_with("My_token")

        out2 = self.rpc_client("uyuni.some_method_2", "my_arg")
        mo2 = getattr(self.rpc_client.conn, "uyuni.some_method_2")
        assert out2 is not None
        assert mo2.called
        mo2.assert_called_with("My_token", "my_arg")

    def test_call_rpc_crash_handle_generic(self):
        """
        Handle XML-RPC method crash wiht generic error

        :return:
        """
        self.rpc_client.token = "the_token"
        exc = Exception("generic error when processing")
        exc.faultCode = 2951
        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=exc))

        with patch("src.modules.uyuni_config.log") as logger:
            with pytest.raises(Exception):
                self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            assert mo.called
            mo.assert_called_with("the_token")
            assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')

    def test_call_rpc_crash_handle_reauthenticate_error(self):
        """
        Handle XML-RPC method crash whit reauthenticate error

        :return:
        """
        self.rpc_client.token = "the_token"
        self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")

        exc = Exception("generic error when processing")
        exc.faultCode = 2950
        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=exc))

        with patch("src.modules.uyuni_config.log") as logger:
            with pytest.raises(Exception):
                self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            assert mo.call_count == 2
            mo.assert_has_calls([call("the_token"), call("the_token_new")])
            self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
            assert self.rpc_client.get_token() == "the_token_new"
            assert logger.error.call_args[0] == ('Unable to call RPC function: %s', 'generic error when processing')

    def test_call_rpc_handle_reauthenticate(self):
        """
        Handle XML-RPC method and reauthenticate

        :return:
        """
        self.rpc_client.token = "the_token"
        self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")

        exc = Exception("generic error when processing")
        exc.faultCode = 2950

        setattr(self.rpc_client.conn, "uyuni.some_method",
                MagicMock(side_effect=[exc, "return string"]))

        assert self.rpc_client.get_token() == "the_token"
        with patch("src.modules.uyuni_config.log") as logger:
            out = self.rpc_client("uyuni.some_method")
            mo = getattr(self.rpc_client.conn, "uyuni.some_method")
            # pdb.set_trace()
            assert out is not None
            assert out == 'return string'
            assert mo.call_count == 2
            mo.assert_has_calls([call("the_token"), call("the_token_new")])
            self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
            assert self.rpc_client.get_token() == "the_token_new"
            assert logger.warning.call_args[0] == ('Fall back to the second try due to %s', 'generic error when processing')

   070701000000EC000081B400000000000000000000000160C1E96E00001269000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_state_mgrcompat.py #-*- coding: utf-8 -*-
'''
Test custom wrapper for "module.run" state module.

Author: Pablo Suárez Herńandez <psuarezhernandez@suse.com>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..states import mgrcompat

TAILORED_MODULE_RUN_KWARGS = {'service.running': [{'text': 'superseded', 'name': 'salt-minion'}, {"foo": "bar"}]}
MGRCOMPAT_MODULE_RUN_KWARGS = {'name': 'service.running', 'text': 'superseded', 'm_name': 'salt-minion', 'kwargs': {'foo': 'bar'}}

mgrcompat.log = MagicMock()
mgrcompat.OrderedDict = dict
mgrcompat.__opts__ = {}
mgrcompat.__grains__ = {}


def test_module_run_on_phosphorous():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3003, None, None, None]}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_magnesium():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3002, None, None, None]}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_magnesium_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3002, None, None, None]}):
        with patch.dict(mgrcompat.__opts__, {'use_superseded': ['module.run']}):
            mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
            mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_sodium():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3001, None, None, None]}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_sodium_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3001, None, None, None]}):
        with patch.dict(mgrcompat.__opts__, {'use_superseded': ['module.run']}):
            mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
            mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_neon():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3000, None, None, None]}):
        mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
        mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_neon_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [3000, None, None, None]}):
        with patch.dict(mgrcompat.__opts__, {'use_superseded': ['module.run']}):
            mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
            mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_2019_2_0_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [2019, 2, 0, 0]}):
        with patch.dict(mgrcompat.__opts__, {'use_superseded': ['module.run']}):
            mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
            mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)

def test_module_run_on_2019_2_0_without_use_superseded():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [2019, 2, 0, 0]}):
        with patch.dict(mgrcompat.__opts__, {}):
            mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
            mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)

def test_module_run_on_2016_11_4():
    mock = MagicMock(return_value={'changes': {'service.running': 'foobar'}})
    mgrcompat.module.run = mock
    with patch.dict(mgrcompat.__grains__, {'saltversioninfo': [2016, 11, 4, 0]}):
       mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
       mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
   070701000000ED000081B400000000000000000000000160C1E96E000010C4000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_state_product.py   '''
Author: cbbayburt@suse.com
'''

import sys
from mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()

from ..states import product

# Mock globals
product.log = MagicMock()
product.__salt__ = {}
product.__grains__ = {}

@patch.dict(product.__grains__, {'os_family': 'Suse'})
def test_suse_with_zypper():
    '''
    Test if the state module is available for SUSE OS only with a
    supported version of zypper (>= 1.8.13) available.
    '''
    # Supported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.9.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=1)):
            assert product.__virtual__() is 'product'
            product.version_cmp.assert_called_once_with('1.9.0', '1.8.13')

    # Unsupported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.8.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=-1)):
            assert product.__virtual__() == (False, "Module product: zypper 1.8.13 or greater required")
            product.version_cmp.assert_called_once_with('1.8.0', '1.8.13')

    # No zypper available
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value=sys.modules['salt.exceptions'].CommandExecutionError)}):
        assert product.__virtual__() == (False, "Module product: zypper package manager not found")


@patch.dict(product.__grains__, {'os_family': 'Non-Suse'})
def test_non_suse():
    '''
    Test if the state module is unavailable for Non-SUSE OS
    '''
    assert product.__virtual__() == (False, "Module product: non SUSE OS not supported")


def test_get_missing_products():
    '''
    Test if the missing products are returned correctly, excluding
    the ones that are provided by another installed product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'product2': True},
        'provides-product1': {'product1': True, 'this-provides-product1': True},
        'provides-product2': {'product2': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        test_data['provides-product2']])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('product2', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that only the non-provided product is returned
        assert res == ['product2']


def test_not_installed_provides():
    '''
    Test if the provided packages are correctly excluded when
    provided by another missing product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'this-provides-product1': True},
        'provides-product1': {'product1': True, 'this-provides-product1': True},
        'provides-product2': {'this-provides-product1': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        test_data['provides-product2']])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('this-provides-product1', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that not both products are returned
        assert len(res) == 1
        # Assert that the provided product is not returned
        assert 'product1' not in res
        # Assert that the providing product is returned
        assert 'this-provides-product1' in res
070701000000EE000081B400000000000000000000000160C1E96E00017185000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_state_uyuni_config.py  import pytest
from mock import MagicMock, patch, call
from . import mockery
import pdb

mockery.setup_environment()

import sys

from ..states import uyuni_config

# Mock globals
uyuni_config.log = MagicMock()
uyuni_config.__salt__ = {}
uyuni_config.__opts__ = {'test': False}


class TestManageUser:

    def test_user_present_new_user_test(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                                  'first_name', 'last_name', False,
                                                  ['role'], ['group'],
                                                  'org_admin_user', 'org_admin_password')
                assert result is not None
                assert result['name'] == 'username'
                assert result['result'] is None
                assert result['comment'] == 'username would be modified'

                assert result['changes'] == {
                    'login': {'new': 'username'},
                    'password': {'new': '(hidden)'},
                    'email': {'new': 'mail@mail.com'},
                    'first_name': {'new': 'first_name'},
                    'last_name': {'new': 'last_name'},
                    'roles': {'new': ['role']},
                    'system_groups': {'new': ['group']}}

                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                        org_admin_user='org_admin_user',
                                                                                        org_admin_password='org_admin_password')

    def test_user_present_new_user_minimal(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=exc),
            'uyuni.user_create': MagicMock(return_value=True)}):
            result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                              'first_name', 'last_name')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'

            assert result['changes'] == {
                'login': {'new': 'username'},
                'password': {'new': '(hidden)'},
                'email': {'new': 'mail@mail.com'},
                'first_name': {'new': 'first_name'},
                'last_name': {'new': 'last_name'}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user=None,
                                                                                    org_admin_password=None)

            uyuni_config.__salt__['uyuni.user_create'].assert_called_once_with(email='mail@mail.com',
                                                                               first_name='first_name',
                                                                               last_name='last_name',
                                                                               use_pam_auth=False,
                                                                               org_admin_password=None,
                                                                               org_admin_user=None,
                                                                               password='password',
                                                                               login='username')

    def test_user_present_new_user_complete(self):
        exc = Exception("user not found")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=exc),
            'uyuni.user_create': MagicMock(return_value=True),
            'uyuni.user_add_role': MagicMock(return_value=True),
            'uyuni.user_add_assigned_system_groups': MagicMock(return_value=1)}):
            result = uyuni_config.user_present('username', 'password', 'mail@mail.com',
                                              'first_name', 'last_name', False,
                                               ['role'], ['group'],
                                              'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'

            assert result['changes'] == {
                'login': {'new': 'username'},
                'password': {'new': '(hidden)'},
                'email': {'new': 'mail@mail.com'},
                'first_name': {'new': 'first_name'},
                'last_name': {'new': 'last_name'},
                'roles': {'new': ['role']},
                'system_groups': {'new': ['group']}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_create'].assert_called_once_with(email='mail@mail.com',
                                                                               first_name='first_name',
                                                                               last_name='last_name',
                                                                               use_pam_auth=False,
                                                                               org_admin_password='org_admin_password',
                                                                               org_admin_user='org_admin_user',
                                                                               password='password',
                                                                               login='username')

            uyuni_config.__salt__['uyuni.user_add_role'].assert_called_once_with('username', role='role',
                                                                                 org_admin_user='org_admin_user',
                                                                                 org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_add_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                   server_group_names=[
                                                                                                      'group'],
                                                                                                   org_admin_user='org_admin_user',
                                                                                                   org_admin_password='org_admin_password')

    def test_user_present_update_user(self):
        exc = Exception("user not found")
        exc.faultCode = 2950

        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(side_effect=[current_user, exc]),
            'uyuni.user_list_roles': MagicMock(return_value=['role1', 'role2']),
            'uyuni.user_list_assigned_system_groups': MagicMock(return_value=[{'name': 'group1'}, {'name': 'group2'}]),
            'uyuni.user_set_details': MagicMock(return_value=True),
            'uyuni.user_remove_role': MagicMock(return_value=True),
            'uyuni.user_add_role': MagicMock(return_value=True),
            'uyuni.user_remove_assigned_system_groups': MagicMock(return_value=1),
            'uyuni.user_add_assigned_system_groups': MagicMock(return_value=1)}):
            result = uyuni_config.user_present('username', 'new_password', 'new_mail@mail.com',
                                              'new_first', 'new_last', False,
                                               ['role1', 'role3'], ['group2', 'group3'],
                                              'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'username user successfully modified'
            assert result['changes'] == {
                'password': {'new': '(hidden)', 'old': '(hidden)'},
                'email': {'new': 'new_mail@mail.com', 'old': 'mail@mail.com'},
                'first_name': {'new': 'new_first', 'old': 'first'},
                'last_name': {'new': 'new_last', 'old': 'last'},
                'roles': {'new': ['role1', 'role3'], 'old': ['role1', 'role2']},
                'system_groups': {'new': ['group2', 'group3'], 'old': ['group1', 'group2']}}

            ## verify mock calls
            uyuni_config.__salt__['uyuni.user_get_details'].assert_has_calls([call('username',
                                                                                   org_admin_user='org_admin_user',
                                                                                   org_admin_password='org_admin_password'),
                                                                              call('username', 'new_password')])

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username',
                                                                                   org_admin_user='org_admin_user',
                                                                                   org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_list_assigned_system_groups'].assert_called_once_with('username',
                                                                                                    org_admin_user='org_admin_user',
                                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_set_details'].assert_called_once_with(email='new_mail@mail.com',
                                                                                    first_name='new_first',
                                                                                    last_name='new_last',
                                                                                    org_admin_password='org_admin_password',
                                                                                    org_admin_user='org_admin_user',
                                                                                    password='new_password',
                                                                                    login='username')

            uyuni_config.__salt__['uyuni.user_remove_role'].assert_called_once_with('username', role='role2',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_add_role'].assert_called_once_with('username', role='role3',
                                                                                 org_admin_user='org_admin_user',
                                                                                 org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_remove_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                      server_group_names=[
                                                                                                         'group1'],
                                                                                                      org_admin_user='org_admin_user',
                                                                                                      org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_add_assigned_system_groups'].assert_called_once_with(login='username',
                                                                                                   server_group_names=[
                                                                                                      'group3'],
                                                                                                   org_admin_user='org_admin_user',
                                                                                                   org_admin_password='org_admin_password')

    def test_user_absent_auth_error(self):
        exc = Exception("Auth error")
        exc.faultCode = 2950

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is False
            assert result['comment'] == "Error deleting user (organization credentials error) 'username': Auth error"
            assert result['changes'] == {}

    def test_user_absent_user_not_exits(self):
        exc = Exception("User not found")
        exc.faultCode = -213

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == "username is already absent"
            assert result['changes'] == {}

    def test_user_absent_generic_error(self):
        exc = Exception("generic error")
        exc.faultCode = 2951

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(side_effect=exc)}):
            with pytest.raises(Exception) as e:
                uyuni_config.user_absent('username',
                                        'org_admin_user', 'org_admin_password')
            assert e.value.faultCode == 2951
            assert e.value.args[0] == 'generic error'

    def test_user_absent_exists_test(self):
        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(return_value=current_user)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.user_absent('username',
                                                 'org_admin_user', 'org_admin_password')

                assert result is not None
                assert result['name'] == 'username'
                assert result['result'] is None
                assert result['comment'] == 'username would be deleted'

                assert result['changes'] == {
                    'login': {'old': 'username'},
                    'email': {'old': 'mail@mail.com'},
                    'first_name': {'old': 'first'},
                    'last_name': {'old': 'last'}}

                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                        org_admin_user='org_admin_user',
                                                                                        org_admin_password='org_admin_password')

    def test_user_absent_exist_user(self):
        current_user = {'uui': 'username',
                        'email': 'mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_delete': MagicMock(return_value=True)}):
            result = uyuni_config.user_absent('username',
                                             'org_admin_user', 'org_admin_password')

            assert result is not None
            assert result['name'] == 'username'
            assert result['result'] is True
            assert result['comment'] == 'User username has been deleted'

            assert result['changes'] == {
                'login': {'old': 'username'},
                'email': {'old': 'mail@mail.com'},
                'first_name': {'old': 'first'},
                'last_name': {'old': 'last'}}

            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('username',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.user_delete'].assert_called_once_with('username',
                                                                               org_admin_user='org_admin_user',
                                                                               org_admin_password='org_admin_password')


class TestManageUserChannels:

    def test_user_channels_org_admin(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=["channel_admin"]),
            'uyuni.channel_list_manageable_channels': MagicMock(),
            'uyuni.channel_list_my_channels': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert not result['result']
            assert result['changes'] == {}
            assert 'org_admin' in result['comment']

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_channel_admin(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=["channel_admin"]),
            'uyuni.channel_list_manageable_channels': MagicMock(),
            'uyuni.channel_list_my_channels': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert not result['result']
            assert result['changes'] == {}
            assert 'channel_admin' in result['comment']

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_add_all(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=['manage1'],
                                                subscribable_channels=['subscribe1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {'manageable_channels': {'manage1': True},
                                         'subscribable_channels': {'subscribe1': True}}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_manageable'].assert_called_once_with('manage1',
                                                                                                       'username',
                                                                                                        True,
                                                                                                       'org_admin_user',
                                                                                                       'org_admin_password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_subscribable'].assert_called_once_with('subscribe1',
                                                                                                         'username',
                                                                                                          True,
                                                                                                         'org_admin_user',
                                                                                                         'org_admin_password')

    def test_user_channels_no_changes(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[{"label": "subscribe1"}]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=['manage1'],
                                                subscribable_channels=['subscribe1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

    def test_user_channels_managed_subscribe_change(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_list_roles': MagicMock(return_value=[]),
            'uyuni.channel_list_manageable_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_list_my_channels': MagicMock(return_value=[{"label": "manage1"}]),
            'uyuni.channel_software_set_user_manageable': MagicMock(),
            'uyuni.channel_software_set_user_subscribable': MagicMock()}):
            result = uyuni_config.user_channels('username', 'password',
                                                manageable_channels=[],
                                                subscribable_channels=['manage1'],
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            print(result)
            assert result is not None
            assert result['name'] == 'username'
            assert result['result']
            assert result['changes'] == {'manageable_channels': {'manage1': False},
                                         'subscribable_channels': {'manage1': True}}

            uyuni_config.__salt__['uyuni.user_list_roles'].assert_called_once_with('username', password='password')

            uyuni_config.__salt__['uyuni.channel_list_manageable_channels'].assert_called_once_with('username',
                                                                                                   'password')

            uyuni_config.__salt__['uyuni.channel_list_my_channels'].assert_called_once_with('username',
                                                                                           'password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_manageable'].assert_called_once_with('manage1',
                                                                                                       'username',
                                                                                                        False,
                                                                                                       'org_admin_user',
                                                                                                       'org_admin_password')

            uyuni_config.__salt__['uyuni.channel_software_set_user_subscribable'].assert_called_once_with('manage1',
                                                                                                         'username',
                                                                                                          True,
                                                                                                         'org_admin_user',
                                                                                                         'org_admin_password')


class TestManageGroups:

    def test_group_present_new_group_test_no_systems(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(),
            'uyuni.systems_get_minion_id_map': MagicMock()}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_present('my_group', 'my group description',
                                                    target='*http*',
                                                    org_admin_user='org_admin_user',
                                                    org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be updated'

                assert result['changes'] == {'description': {'new': 'my group description'},
                                             'name': {'new': 'my_group'}}

                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

                uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
                uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                                'org_admin_password')

    def test_group_present_new_group_test(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(return_value={'minions': ['my_minion_1', 'my_minion_2']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_present('my_group', 'my group description',
                                                    target='*http*',
                                                    org_admin_user='org_admin_user',
                                                    org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be updated'

                assert result['changes'] == {'description': {'new': 'my group description'},
                                             'systems': {'new': ['10001']},
                                             'name': {'new': 'my_group'}}

                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

                uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
                uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                                'org_admin_password')

    def test_group_present_new_group(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(side_effect=exc),
            'uyuni.master_select_minions': MagicMock(return_value={'minions': ['my_minion_1', 'my_minion_2']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001'}),
            'uyuni.systemgroup_create': MagicMock(),
            'uyuni.systemgroup_add_remove_systems': MagicMock()}):
            result = uyuni_config.group_present('my_group', 'my group description',
                                                target='*http*',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result'] is True
            assert result['comment'] == 'my_group successfully updated'

            assert result['changes'] == {'description': {'new': 'my group description'},
                                         'systems': {'new': ['10001']},
                                         'name': {'new': 'my_group'}}

            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
            uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                            'org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_create'].assert_called_once_with('my_group', 'my group description',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_add_remove_systems'].assert_called_once_with('my_group', True,
                                                                                                  ['10001'],
                                                                                                  org_admin_user='org_admin_user',
                                                                                                  org_admin_password='org_admin_password')

    def test_group_present_update_group(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(
                return_value={'description': 'old description', 'name': 'my_group'}),
            'uyuni.systemgroup_list_systems': MagicMock(return_value=[{'id': '10001'}, {'id': '10003'}]),
            'uyuni.master_select_minions': MagicMock(
                return_value={'minions': ['my_minion_1', 'my_minion_2', 'my_minion_4']}),
            'uyuni.systems_get_minion_id_map': MagicMock(return_value={'my_minion_1': '10001', 'my_minion_2': '10002'}),
            'uyuni.systemgroup_update': MagicMock(),
            'uyuni.systemgroup_add_remove_systems': MagicMock()}):
            result = uyuni_config.group_present('my_group', 'my group description',
                                                target='*http*',
                                                org_admin_user='org_admin_user',
                                                org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'my_group successfully updated'

            assert result['changes'] == {'description': {'new': 'my group description',
                                                         'old': 'old description'},
                                         'systems': {'new': ['10001', '10002'],
                                                     'old': ['10001', '10003']}}

            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_list_systems'].assert_called_once_with('my_group',
                                                                                            org_admin_user='org_admin_user',
                                                                                            org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.master_select_minions'].assert_called_once_with('*http*', 'glob')
            uyuni_config.__salt__['uyuni.systems_get_minion_id_map'].assert_called_once_with('org_admin_user',
                                                                                            'org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_update'].assert_called_once_with('my_group', 'my group description',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_add_remove_systems'].assert_has_calls([call('my_group', False,
                                                                                                 ['10003'],
                                                                                                 org_admin_user='org_admin_user',
                                                                                                 org_admin_password='org_admin_password'),
                                                                                            call('my_group', True,
                                                                                                ['10002'],
                                                                                                org_admin_user='org_admin_user',
                                                                                                org_admin_password='org_admin_password')])

    def test_group_absent_success_test(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(
                return_value={'description': 'description', 'name': 'my_group'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.group_absent('my_group',
                                                   org_admin_user='org_admin_user',
                                                   org_admin_password='org_admin_password')
                assert result is not None
                assert result['name'] == 'my_group'
                assert result['result'] is None
                assert result['comment'] == 'my_group would be removed'

                assert result['changes'] == {}
                uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                               org_admin_user='org_admin_user',
                                                                                               org_admin_password='org_admin_password')

    def test_group_absent_success(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_get_details': MagicMock(return_value={'description': 'description', 'name': 'my_group'}),
            'uyuni.systemgroup_delete': MagicMock(return_value=True)}):
            result = uyuni_config.group_absent('my_group',
                                               org_admin_user='org_admin_user',
                                               org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'Group my_group has been deleted'

            assert result['changes'] == {'description': {'old': 'description'},
                                         'name': {'old': 'my_group'}}
            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.systemgroup_delete'].assert_called_once_with('my_group',
                                                                                      org_admin_user='org_admin_user',
                                                                                      org_admin_password='org_admin_password')

    def test_group_absent_already_removed(self):
        exc = Exception("Group not found")
        exc.faultCode = 2201

        with patch.dict(uyuni_config.__salt__, {'uyuni.systemgroup_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.group_absent('my_group',
                                               org_admin_user='org_admin_user',
                                               org_admin_password='org_admin_password')
            assert result is not None
            assert result['name'] == 'my_group'
            assert result['result']
            assert result['comment'] == 'my_group is already absent'

            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.systemgroup_get_details'].assert_called_once_with('my_group',
                                                                                           org_admin_user='org_admin_user',
                                                                                           org_admin_password='org_admin_password')


class TestManageOrgs:

    def test_org_present_new_org_test(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {'uyuni.org_get_details': MagicMock(side_effect=exc)}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                                 'First Name', 'Last Name', 'email@email.com',
                                                  admin_user='admin_user',
                                                  admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'my_org'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be updated'
                assert result['changes'] == {'email': {'new': 'email@email.com'},
                                             'first_name': {'new': 'First Name'},
                                             'last_name': {'new': 'Last Name'},
                                             'org_admin_user': {'new': 'org_admin_user'},
                                             'org_name': {'new': 'my_org'},
                                             'pam': {'new': False}}
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_present_new_org(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(side_effect=exc),
            'uyuni.org_create': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org org successfully modified'
            assert result['changes'] == {'email': {'new': 'email@email.com'},
                                         'first_name': {'new': 'First Name'},
                                         'last_name': {'new': 'Last Name'},
                                         'org_admin_user': {'new': 'org_admin_user'},
                                         'org_name': {'new': 'my_org'},
                                         'pam': {'new': False}}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_create'].assert_called_once_with(name='my_org',
                                                                              org_admin_user="org_admin_user",
                                                                              org_admin_password="org_admin_password",
                                                                              first_name="First Name",
                                                                              last_name="Last Name",
                                                                              email="email@email.com",
                                                                              admin_user='admin_user',
                                                                              admin_password='admin_password',
                                                                              pam=False)

    def test_org_present_update_org(self):
        current_user = {'uui': 'org_admin_user',
                        'email': 'old_mail@mail.com',
                        'first_name': 'first',
                        'last_name': 'last'}
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_set_details': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org org successfully modified'
            assert result['changes'] == {'email': {'new': 'email@email.com',
                                                   'old': 'old_mail@mail.com'},
                                         'first_name': {'new': 'First Name',
                                                        'old': 'first'},
                                         'last_name': {'new': 'Last Name',
                                                       'old': 'last'}}

            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

            uyuni_config.__salt__['uyuni.user_set_details'].assert_called_once_with(login='org_admin_user',
                                                                                    password='org_admin_password',
                                                                                    email='email@email.com',
                                                                                    first_name='First Name',
                                                                                    last_name='Last Name',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

    def test_org_present_no_changes(self):
        current_user = {'uui': 'org_admin_user',
                        'email': 'email@email.com',
                        'first_name': 'First Name',
                        'last_name': 'Last Name'}
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.user_get_details': MagicMock(return_value=current_user),
            'uyuni.user_set_details': MagicMock()}):
            result = uyuni_config.org_present('my_org', 'org_admin_user', 'org_admin_password',
                                             'First Name', 'Last Name', 'email@email.com',
                                              admin_user='admin_user',
                                              admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user',
                                                                                    org_admin_user='org_admin_user',
                                                                                    org_admin_password='org_admin_password')

    def test_org_absent_success_test(self):
        with patch.dict(uyuni_config.__salt__,
                        {'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_absent('my_org',
                                                 admin_user='admin_user',
                                                 admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'my_org'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be removed'
                assert result['changes'] == {}
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_absent_success(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_get_details': MagicMock(return_value={'id': 100, 'name': 'my_org'}),
            'uyuni.org_delete': MagicMock()}):
            result = uyuni_config.org_absent('my_org',
                                             admin_user='admin_user',
                                             admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'Org my_org has been deleted'
            assert result['changes'] == {'name': {'old': 'my_org'}}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_delete'].assert_called_once_with('my_org',
                                                                              admin_user='admin_user',
                                                                              admin_password='admin_password')

    def test_org_absent_not_present(self):
        exc = Exception("org not found")
        exc.faultCode = 2850

        with patch.dict(uyuni_config.__salt__, {'uyuni.org_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.org_absent('my_org',
                                             admin_user='admin_user',
                                             admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'my_org'
            assert result['result']
            assert result['comment'] == 'my_org is already absent'
            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')


class TestManageOrgsTrust:

    def test_org_trust_test(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': False}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'})}):
            with patch.dict(uyuni_config.__opts__, {'test': True}):
                result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                                admin_user='admin_user',
                                                admin_password='admin_password')

                assert result is not None
                assert result['name'] == 'state_name'
                assert result['result'] is None
                assert result['comment'] == 'my_org would be created'
                assert result['changes'] == {'new_org_2': {'new': True, 'old': None}}

                uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                             admin_user='admin_user',
                                                                                             admin_password='admin_password')
                uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')

    def test_org_trust_update(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': False},
                              {'orgId': 4, 'orgName': 'new_org_3', 'trustEnabled': True}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'}),
            'uyuni.org_trust_add_trust': MagicMock(return_value=True),
            'uyuni.org_trust_remove_trust': MagicMock(return_value=True)}):

            result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                            admin_user='admin_user',
                                            admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'state_name'
            assert result['result']
            assert result['comment'] == "Org 'my_org' trusts successfully modified"
            assert result['changes'] == {'new_org_2': {'new': True, 'old': None},
                                         'new_org_3': {'new': None, 'old': True}}

            uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                         admin_user='admin_user',
                                                                                         admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_trust_add_trust'].assert_called_once_with(1, 3,
                                                                                       admin_user='admin_user',
                                                                                       admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_trust_remove_trust'].assert_called_once_with(1, 4,
                                                                                          admin_user='admin_user',
                                                                                          admin_password='admin_password')


    def test_org_trust_no_changes(self):
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.org_trust_list_trusts': MagicMock(
                return_value=[{'orgId': 2, 'orgName': 'new_org_1', 'trustEnabled': True},
                              {'orgId': 3, 'orgName': 'new_org_2', 'trustEnabled': True},
                              {'orgId': 4, 'orgName': 'new_org_3', 'trustEnabled': False}]),
            'uyuni.org_get_details': MagicMock(return_value={'id': 1, 'name': 'my_org'})}):

            result = uyuni_config.org_trust('state_name', 'my_org', ['new_org_1', 'new_org_2'],
                                            admin_user='admin_user',
                                            admin_password='admin_password')

            assert result is not None
            assert result['name'] == 'state_name'
            assert result['result']
            assert result['comment'] == 'my_org is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.org_trust_list_trusts'].assert_called_once_with('my_org',
                                                                                         admin_user='admin_user',
                                                                                         admin_password='admin_password')
            uyuni_config.__salt__['uyuni.org_get_details'].assert_called_once_with('my_org',
                                                                                   admin_user='admin_user',
                                                                                   admin_password='admin_password')


class TestUyuniActivationKeys:

    MINIMAL_AK_PRESENT = {
        'name': 'ak',
        'description': 'ak description',
        'org_admin_user': 'admin',
        'org_admin_password': 'admin'
    }

    FULL_AK_PRESENT = {
        **MINIMAL_AK_PRESENT,
        'base_channel': 'sles15SP2',
        'usage_limit': 10,
        'contact_method': 'ssh-push',
        'system_types': ['virtualization_host'],
        'universal_default': True,
        'child_channels': ['sles15SP2-tools'],
        'configuration_channels': ['my-channel'],
        'packages': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}],
        'server_groups': ['my-group'],
        'configure_after_registration': True
    }

    ORG_USER_DETAILS = {
        'org_id': 1
    }

    ALL_GROUPS = [
        {'name': 'my-group', 'id': 1},
        {'name': 'old_group', 'id': 2}
    ]

    def test_ak_present_create_minimal_data(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc),
            'uyuni.activation_key_create': MagicMock(),
            'uyuni.activation_key_set_details': MagicMock()}):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {
                'description': {'new': 'ak description'},
                'base_channel': {'new': ''},
                'usage_limit': {'new': 0},
                'universal_default': {'new': False},
                'contact_method': {'new': 'default'},
                'configure_after_registration': {'new': False},
                'key': {'new': '1-ak'}
            }
            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

            call_values = {'description': self.MINIMAL_AK_PRESENT['description'],
                           'key': self.MINIMAL_AK_PRESENT['name'],
                           'base_channel_label': '',
                           'usage_limit': 0,
                           'system_types': [],
                           'universal_default': False,
                           'org_admin_user': self.MINIMAL_AK_PRESENT['org_admin_user'],
                           'org_admin_password': self.MINIMAL_AK_PRESENT['org_admin_password']}

            uyuni_config.__salt__['uyuni.activation_key_create'].assert_called_once_with(**call_values)
            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              contact_method='default',
                                                                                              usage_limit=0,
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

    def test_ak_present_create_full_data(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc),
            'uyuni.activation_key_create': MagicMock(),
            'uyuni.activation_key_set_details': MagicMock(),
            'uyuni.activation_key_add_child_channels': MagicMock(),
            'uyuni.activation_key_add_server_groups': MagicMock(),
            'uyuni.activation_key_add_packages': MagicMock(),
            'uyuni.activation_key_enable_config_deployment': MagicMock(),
            'uyuni.activation_key_set_config_channels': MagicMock(),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {
                'description': {'new': 'ak description'},
                'base_channel': {'new': 'sles15SP2'},
                'usage_limit': {'new': 10},
                'universal_default': {'new': True},
                'contact_method': {'new': 'ssh-push'},
                'system_types': {'new': ['virtualization_host']},
                'child_channels': {'new': ['sles15SP2-tools']},
                'server_groups': {'new': ['my-group']},
                'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}]},
                'configure_after_registration': {'new': True},
                'configuration_channels': {'new': ['my-channel']},
                'key': {'new': '1-ak'}
            }
            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')

            uyuni_config.__salt__['uyuni.activation_key_create'].assert_called_once_with(
                description=self.FULL_AK_PRESENT['description'],
                key=self.FULL_AK_PRESENT['name'],
                base_channel_label=self.FULL_AK_PRESENT['base_channel'],
                usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                system_types=self.FULL_AK_PRESENT['system_types'],
                universal_default=self.FULL_AK_PRESENT['universal_default'],
                org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                org_admin_password=self.FULL_AK_PRESENT['org_admin_password']
            )
            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              contact_method=self.FULL_AK_PRESENT['contact_method'],
                                                                                              usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                                                                                              org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_child_channels'].assert_called_once_with('1-ak',
                                                                                                      self.FULL_AK_PRESENT['child_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_server_groups'].assert_called_once_with('1-ak',
                                                                                                    [1],
                                                                                                    org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                    org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_packages'].assert_called_once_with('1-ak',
                                                                                                self.FULL_AK_PRESENT['packages'],
                                                                                                org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_enable_config_deployment'].assert_called_once_with('1-ak',
                                                                                                            org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                            org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_set_config_channels'].assert_called_once_with(['1-ak'],
                                                                                                      config_channel_label=self.FULL_AK_PRESENT['configuration_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

    def test_ak_present_create_full_data_test(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(side_effect=exc)
        }):
            with patch.dict(uyuni_config.__opts__, {'test': True}):

                result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)

                assert result is not None
                assert result['name'] == '1-ak'
                assert result['result'] is None
                assert result['comment'] == '1-ak would be updated'

                assert result['changes'] == {
                    'description': {'new': 'ak description'},
                    'base_channel': {'new': 'sles15SP2'},
                    'usage_limit': {'new': 10},
                    'universal_default': {'new': True},
                    'contact_method': {'new': 'ssh-push'},
                    'system_types': {'new': ['virtualization_host']},
                    'child_channels': {'new': ['sles15SP2-tools']},
                    'server_groups': {'new': ['my-group']},
                    'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}]},
                    'configure_after_registration': {'new': True},
                    'configuration_channels': {'new': ['my-channel']},
                    'key': {'new': '1-ak'}
                }
                uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
                uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
                uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                                  org_admin_user='admin',
                                                                                                  org_admin_password='admin')

    def test_ak_present_update_minimal_data(self):
        return_ak = {
            'description': 'old description',
            'base_channel_label': 'none',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': [],
            'child_channel_labels': [],
            'server_group_ids': [],
            'packages': []
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[]),
            'uyuni.activation_key_set_details': MagicMock()
        }):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {'description': {'new': 'ak description', 'old': 'old description'}}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              description=self.MINIMAL_AK_PRESENT['description'],
                                                                                              contact_method='default',
                                                                                              base_channel_label='',
                                                                                              usage_limit=0,
                                                                                              universal_default=False,
                                                                                              org_admin_user=self.MINIMAL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.MINIMAL_AK_PRESENT['org_admin_password'])

    def test_ak_present_no_changes_minimal_data(self):
        return_ak = {
            'description': self.MINIMAL_AK_PRESENT['description'],
            'base_channel_label': 'none',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': [],
            'child_channel_labels': [],
            'server_group_ids': [],
            'packages': []
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[]),
            'uyuni.activation_key_set_details': MagicMock()
        }):

            result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak is already in the desired state'
            assert result['changes'] == {}

    def test_ak_present_update_full_data(self):

        return_ak = {
            'description': 'old description',
            'base_channel_label': 'base_channel',
            'usage_limit': 0,
            'universal_default': False,
            'contact_method': 'default',
            'entitlements': ['container_build_host'],
            'child_channel_labels': ['child_channel'],
            'server_group_ids': [2],
            'packages': [{'name': 'pkg', 'arch': 'x86_63'}]
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=False),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[{'label': 'old_config'}]),
            'uyuni.activation_key_set_details': MagicMock(),
            'uyuni.activation_key_add_entitlements': MagicMock(),
            'uyuni.activation_key_remove_entitlements': MagicMock(),
            'uyuni.activation_key_add_child_channels': MagicMock(),
            'uyuni.activation_key_remove_child_channels': MagicMock(),
            'uyuni.activation_key_add_server_groups': MagicMock(),
            'uyuni.activation_key_remove_server_groups': MagicMock(),
            'uyuni.activation_key_add_packages': MagicMock(),
            'uyuni.activation_key_remove_packages': MagicMock(),
            'uyuni.activation_key_enable_config_deployment': MagicMock(),
            'uyuni.activation_key_set_config_channels': MagicMock(),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak activation key successfully modified'
            assert result['changes'] == {'description': {'new': 'ak description', 'old': 'old description'},
                                         'base_channel': {'new': 'sles15SP2', 'old': 'base_channel'},
                                         'usage_limit': {'new': 10, 'old': 0},
                                         'universal_default': {'new': True, 'old': False},
                                         'contact_method': {'new': 'ssh-push', 'old': 'default'},
                                         'system_types': {'new': ['virtualization_host'],
                                                          'old': ['container_build_host']},
                                         'child_channels': {'new': ['sles15SP2-tools'], 'old': ['child_channel']},
                                         'server_groups': {'new': ['my-group'], 'old': ['old_group']},
                                         'packages': {'new': [{'name': 'vim'}, {'name': 'emacs', 'arch': 'x86_64'}],
                                                      'old': [{'name': 'pkg', 'arch': 'x86_63'}]},
                                         'configure_after_registration': {'new': True, 'old': False},
                                         'configuration_channels': {'new': ['my-channel'], 'old': ['old_config']}}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

            uyuni_config.__salt__['uyuni.activation_key_set_details'].assert_called_once_with('1-ak',
                                                                                              description=self.FULL_AK_PRESENT['description'],
                                                                                              contact_method=self.FULL_AK_PRESENT['contact_method'],
                                                                                              base_channel_label=self.FULL_AK_PRESENT['base_channel'],
                                                                                              usage_limit=self.FULL_AK_PRESENT['usage_limit'],
                                                                                              universal_default=self.FULL_AK_PRESENT['universal_default'],
                                                                                              org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                              org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_entitlements'].assert_called_once_with('1-ak',
                                                                                                   self.FULL_AK_PRESENT['system_types'],
                                                                                                   org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                   org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_entitlements'].assert_called_once_with('1-ak',
                                                                                                      ['container_build_host'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_child_channels'].assert_called_once_with('1-ak',
                                                                                                     self.FULL_AK_PRESENT['child_channels'],
                                                                                                     org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                     org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_child_channels'].assert_called_once_with('1-ak',
                                                                                                        ['child_channel'],
                                                                                                        org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                        org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_server_groups'].assert_called_once_with('1-ak',
                                                                                                    [1],
                                                                                                    org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                    org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_server_groups'].assert_called_once_with('1-ak',
                                                                                                       [2],
                                                                                                       org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                       org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_add_packages'].assert_called_once_with('1-ak',
                                                                                               self.FULL_AK_PRESENT['packages'],
                                                                                               org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                               org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])
            uyuni_config.__salt__['uyuni.activation_key_remove_packages'].assert_called_once_with('1-ak',
                                                                                                  [{'name': 'pkg', 'arch': 'x86_63'}],
                                                                                                  org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                  org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_enable_config_deployment'].assert_called_once_with('1-ak',
                                                                                                           org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                           org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

            uyuni_config.__salt__['uyuni.activation_key_set_config_channels'].assert_called_once_with(['1-ak'],
                                                                                                      config_channel_label=self.FULL_AK_PRESENT['configuration_channels'],
                                                                                                      org_admin_user=self.FULL_AK_PRESENT['org_admin_user'],
                                                                                                      org_admin_password=self.FULL_AK_PRESENT['org_admin_password'])

    def test_ak_present_no_changes_full_data(self):

        return_ak = {
            'description': self.FULL_AK_PRESENT['description'],
            'base_channel_label': self.FULL_AK_PRESENT['base_channel'],
            'usage_limit': self.FULL_AK_PRESENT['usage_limit'],
            'universal_default': self.FULL_AK_PRESENT['universal_default'],
            'contact_method': self.FULL_AK_PRESENT['contact_method'],
            'entitlements': self.FULL_AK_PRESENT['system_types'],
            'child_channel_labels': self.FULL_AK_PRESENT['child_channels'],
            'server_group_ids': [1],
            'packages': self.FULL_AK_PRESENT['packages']
        }
        with patch.dict(uyuni_config.__salt__, {
            'uyuni.systemgroup_list_all_groups': MagicMock(return_value=self.ALL_GROUPS),
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value=return_ak),
            'uyuni.activation_key_check_config_deployment': MagicMock(return_value=True),
            'uyuni.activation_key_list_config_channels': MagicMock(return_value=[{'label': 'my-channel'}]),
        }):

            result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
            assert result is not None
            assert result['name'] == '1-ak'
            assert result['result']
            assert result['comment'] == '1-ak is already in the desired state'
            assert result['changes'] == {}

            uyuni_config.__salt__['uyuni.systemgroup_list_all_groups'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('admin', 'admin')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='admin',
                                                                                              org_admin_password='admin')
            uyuni_config.__salt__['uyuni.activation_key_check_config_deployment'].assert_called_once_with('1-ak','admin','admin')
            uyuni_config.__salt__['uyuni.activation_key_list_config_channels'].assert_called_once_with('1-ak','admin','admin')

    def test_ak_absent_not_present(self):
        exc = Exception("ak not found")
        exc.faultCode = -212

        with patch.dict(uyuni_config.__salt__, {'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
                                                'uyuni.activation_key_get_details': MagicMock(side_effect=exc)}):
            result = uyuni_config.activation_key_absent('ak',
                                                        org_admin_user='org_admin_user',
                                                        org_admin_password='org_admin_password')

            assert result is not None
            assert result['name'] == 'ak'
            assert result['result']
            assert result['comment'] == '1-ak is already absent'
            assert result['changes'] == {}
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user', 'org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='org_admin_user',
                                                                                              org_admin_password='org_admin_password')

    def test_ak_absent_present(self):

        with patch.dict(uyuni_config.__salt__, {
            'uyuni.user_get_details': MagicMock(return_value=self.ORG_USER_DETAILS),
            'uyuni.activation_key_get_details': MagicMock(return_value={}),
            'uyuni.activation_key_delete': MagicMock()}):

            result = uyuni_config.activation_key_absent('ak',
                                                        org_admin_user='org_admin_user',
                                                        org_admin_password='org_admin_password')

            assert result is not None
            assert result['name'] == 'ak'
            assert result['result']
            assert result['comment'] == 'Activation Key 1-ak has been deleted'
            assert result['changes'] == {'id': {'old': '1-ak'}}
            uyuni_config.__salt__['uyuni.user_get_details'].assert_called_once_with('org_admin_user', 'org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_get_details'].assert_called_once_with('1-ak',
                                                                                              org_admin_user='org_admin_user',
                                                                                              org_admin_password='org_admin_password')
            uyuni_config.__salt__['uyuni.activation_key_delete'].assert_called_once_with('1-ak',
                                                                                          org_admin_user='org_admin_user',
                                                                                          org_admin_password='org_admin_password')
   070701000000EF000081B400000000000000000000000160C1E96E00007E8D000000000000000000000000000000000000002800000000susemanager-sls/susemanager-sls.changes   -------------------------------------------------------------------
Wed Jun 09 16:59:01 CEST 2021 - jgonzalez@suse.com

- version 4.1.28-1
- exclude openSUSE Leap 15.3 from product installation (bsc#1186858)

-------------------------------------------------------------------
Thu Jun 03 14:00:29 CEST 2021 - jgonzalez@suse.com

- version 4.1.27-1
- Enable certificate deployment for Leap 15.3 clients which is needed for
  bootstrapping (bsc#1186765)

-------------------------------------------------------------------
Fri May 21 17:26:20 CEST 2021 - jgonzalez@suse.com

- version 4.1.26-1
- fix installation of gnupg on Debian 10

-------------------------------------------------------------------
Wed May 19 16:34:19 CEST 2021 - jgonzalez@suse.com

- version 4.1.25-1
- Do not install python2-salt on Salt 3002.2 Docker build hosts (bsc#1185506)
- Add support for 'disable_local_repos' salt minion config parameter
  (bsc#1185568)

-------------------------------------------------------------------
Tue May 04 13:55:56 CEST 2021 - jgonzalez@suse.com

- version 4.1.24-1
- Fix insecure JMX configuration (bsc#1184617)
- Avoid conflicts with running ioloop on mgr_events engine (bsc#1172711)
- keep salt-minion when it is installed to prevent update problems with
  dependend packages not available in the bootstrap repo (bsc#1183573)

-------------------------------------------------------------------
Thu Apr 08 11:31:14 CEST 2021 - jgonzalez@suse.com

- version 4.1.23-1
- Require new kiwi-systemdeps packages (bsc#1184271)

-------------------------------------------------------------------
Mon Mar 29 12:05:00 CEST 2021 - jgonzalez@suse.com

- version 4.1.22-1
- Prevent useless package list refresh actions on zypper minions (bsc#1183661)
- Skip removed product classes with satellite-sync
- handle GPG keys when bootstrapping ssh minions (bsc#1181847)

-------------------------------------------------------------------
Mon Mar 01 13:24:57 CET 2021 - jgonzalez@suse.com

- version 4.1.21-1
- Ubuntu 18 has version of apt which does not correctly support
  auth.conf.d directory. Detect the working version and use this feature
  only when we have a higher version installed

-------------------------------------------------------------------
Fri Jan 22 13:19:00 CET 2021 - jgonzalez@suse.com

- version 4.1.20-1
- fix apt login for similar channel labels (bsc#1180803)

-------------------------------------------------------------------
Tue Jan 12 13:57:25 CET 2021 - jgonzalez@suse.com

- version 4.1.19-1
- Change behavior of mgrcompat wrapper after deprecation changes on Salt 3002
- Make autoinstallation provisoning compatible with GRUB and ELILO
  in addition to GRUB2 only (bsc#1164227)

-------------------------------------------------------------------
Mon Nov 23 15:32:19 CET 2020 - jgonzalez@suse.com

- version 4.1.18-1
- Fix: sync before start action chains (bsc#1177336)
- Temp: revert Sync state modules when starting action chain execution (bsc#1177336)
- Handle group- and org-specific image pillars
- use require in reboot trigger (bsc#1177767)
- add pillar option to get allowVendorChange option during dist upgrade
- Sync state modules when starting action chain execution (bsc#1177336)

-------------------------------------------------------------------
Mon Oct 26 12:02:08 CET 2020 - jgonzalez@suse.com

- version 4.1.17-1
- Fix grub2 autoinstall kernel path (bsc#1178060)

-------------------------------------------------------------------
Thu Oct 22 11:14:35 CEST 2020 - jgonzalez@suse.com

- version 4.1.16-1
- Fix action chain resuming when patches updating salt-minion don't cause service to be
  restarted (bsc#1144447)
- Make grub2 autoinstall kernel path relative to the boot partition root (bsc#1175876)
- Move channel token information from sources.list to auth.conf on Debian 10 and Ubuntu 18 and newer
- Add support for activation keys on server configuration Salt modules
- ensure the yum/dnf plugins are enabled
- Remove hostname from /var/lib/salt/.ssh/known_hosts when deleting system (bsc#1176159)

-------------------------------------------------------------------
Tue Sep 15 10:54:03 CEST 2020 - jgonzalez@suse.com

- version 4.1.15-1
- Add uyuni-config-modules subpackage with Salt modules to configure
  Servers
- Fix reporting of missing products in product.all_installed (bsc#1165829)

-------------------------------------------------------------------
Wed Aug 26 12:31:40 CEST 2020 - jgonzalez@suse.com

- version 4.1.14-1
- Fix the dnf plugin to add the token to the HTTP header (bsc#1175724)

-------------------------------------------------------------------
Wed Aug 12 10:58:00 CEST 2020 - jgonzalez@suse.com

- version 4.1.13-1
- Fix: supply a dnf base when dealing w/repos (bsc#1172504)
- Fix: autorefresh in repos is zypper-only
- Add virtual network state change state to handle start, stop and delete
- Add virtual network state change state to handle start and stop

-------------------------------------------------------------------
Thu Jul 23 13:41:10 CEST 2020 - jgonzalez@suse.com

- version 4.1.12-1
- fetch oracle-release when looking for RedHat Product Info (bsc#1173584)
- Force a refresh after deleting a virtual storage volume
- Prevent stuck Hardware Refresh actions on Salt 2016.11.10 based SSH minions (bsc#1173169)
- Require PyYAML version >= 5.1
- Log out of Docker registries after image build (bsc#1165572)
- Prevent "module.run" deprecation warnings by using custom mgrcompat module

-------------------------------------------------------------------
Wed Jul 01 16:13:07 CEST 2020 - jgonzalez@suse.com

- version 4.1.11-1
- Fix detection of CentOS systems to properly set bootstrap repo (bsc#1173556)
- Do not produce syntax error on custom ssh_agent Salt module when
  executing on Python 2 instance.

-------------------------------------------------------------------
Tue Jun 23 17:24:45 CEST 2020 - jgonzalez@suse.com

- version 4.1.10-1
- Remove VM disk type attribute
- Merge virtualization fragment into suma-minion pillar (bsc#1172962)

-------------------------------------------------------------------
Wed Jun 17 16:21:24 CEST 2020 - jgonzalez@suse.com

- version 4.1.9-1
- Add ssh_agent for CaaSP management

-------------------------------------------------------------------
Wed Jun 10 12:41:08 CEST 2020 - jgonzalez@suse.com

- version 4.1.8-1
- Avoid SSL certificate issue when bootstrapping OpenSUSE Leap 15.2 (bsc#1172712)
- Add Salt states for CaaSP cluster management
- Use minion fqdn instead of minion id as target in kiwi_collect_image
  runner. If fqdn is not present or is localhost, use minion ip as
  fallback (bsc#1170737)
- trust customer gpg key when metadata signing is enabled
- specify gpg key for RH systems in repo file (bsc#1172286)
- Implement CaaSP cluster upgrade procedure in cluster provider module.
- handle GPG check flags different for yum/dnf (bsc#1171859)
- Enable bootstrapping for Oracle Linux 6, 7 and 8
- Set YAML loader to fix deprecation warnings

-------------------------------------------------------------------
Wed May 20 11:06:24 CEST 2020 - jgonzalez@suse.com

- version 4.1.7-1
- Fix failing "Hardware Refresh" actions because wrong "instance_id" reported
  from minion due a captive portal on the network (bsc#1171491)
- Remove suseRegisterInfo package only if it's plain client (bsc#1171262)
- On Debian-like systems, install only required dependencies when installing salt
- Enable support for bootstrapping Ubuntu 20.04 LTS
- Pass image profile custom info values as Docker buildargs during image build
- Cluster Awareness: Introduce generic SLS files for Cluster Management
  and CaaSP Cluster Provider custom Salt module.
- Add virtual volume delete action
- Ubuntu no longer shows removed packages as installed (bsc#1171461)

-------------------------------------------------------------------
Mon Apr 13 09:37:50 CEST 2020 - jgonzalez@suse.com

- version 4.1.6-1
- Fix virt.deleted state dependency
- Make 'product' state module only available for minions with zypper >= 1.8.13 (bsc#1166699)
- Use saltutil states if available on the minion (bsc#1167556)
- Enable support for bootstrapping Astra Linux CE "Orel"
- remove key grains only when file and grain exists (bsc#1167237)
- Add virtual storage pool actions

-------------------------------------------------------------------
Thu Mar 19 12:17:47 CET 2020 - jgonzalez@suse.com

- version 4.1.5-1
- Enable support for bootstrapping Debian 9 and 10
- Adapt 'mgractionchains' module to work with Salt 3000

-------------------------------------------------------------------
Wed Mar 11 11:03:06 CET 2020 - jgonzalez@suse.com

- version 4.1.4-1
- cleanup key grains after usage
- Disable modularity failsafe mechanism for RHEL 8 repos (bsc#1164875)
- install dmidecode before HW profile update when missing
- Add mgr_start_event_grains.sls to update minion config
- Add 'product' custom state module to handle installation of
  SUSE products at client side (bsc#1157447)
- Support reading of pillar data for minions from multiple files (bsc#1158754)

-------------------------------------------------------------------
Mon Feb 17 12:56:29 CET 2020 - jgonzalez@suse.com

- version 4.1.3-1
- Do not workaround util.syncmodules for SSH minions (bsc#1162609)
- Force to run util.synccustomall when triggering action chains on SSH minions (bsc#1162683).
- Adapt sls file for pre-downloading in Ubuntu minions
- Add custom 'is_payg_instance' grain when instance is PAYG and not BYOS.

-------------------------------------------------------------------
Wed Jan 22 12:25:10 CET 2020 - jgonzalez@suse.com

- version 4.1.2-1
- Only install python2-salt on buildhosts if it is available
- sort formulas by execution order (bsc#1083326)
- split remove_traditional_stack into two parts. One for all systems and
  another for clients not being a Uyuni Server or Proxy (bsc#1121640)
- Change the order to check the version correctly for RES (bsc#1152795)
- Remove the virt-poller cache when applying Virtualization entitlement
- Force HTTP request timeout on public cloud grain (bsc#1157975)

-------------------------------------------------------------------
Wed Nov 27 17:08:25 CET 2019 - jgonzalez@suse.com

- version 4.1.1-1
- dockerhost: install python2 salt packages only when python2
  is available (bsc#1129627)
- Support license entry in kiwi image packages list
- Install yum plguin for only yum < 4 (bsc#1156173)
- Add self monitoring to Admin Monitoring UI (bsc#1143638)
- configure GPG keys and SSL Certificates for RHEL8 and ES8
- Always run Kiwi with empty cache (bsc#1155899)
- Do not show errors when polling internal metadata API (bsc#1155794)
- Avoid traceback error due lazy loading which_bin (bsc#1155794)
- Add missing "public_cloud" custom grain (bsc#1155656)
- Consider timeout value in salt remote script (bsc#1153181)
- Using new module path for which_bin to get rid of DeprecationWarning
- Fix: match `image_id` with newer k8s (bsc#1149741)
- Bump version to 4.1.0 (bsc#1154940)
- Always install latest available salt during bootstrap
- Create Kiwi cache dir if not present
- Require pmtools only for SLE11 i586 and x86_64 (bsc#1150314)
- do not break Servers registering to a Server
- Introduce dnf-susemanager-plugin for RHEL8 minions
- Provide custom grain to report "instance id" when running on Public Cloud instances
- enable Kiwi NG on SLE15
- disable legacy startup events for new minions
- implement provisioning for salt clients
- Bootstrapping RES6/RHEL6/SLE11 with TLS1.2 now shows error message. (bsc#1147126)
- Fix for issue with bootstrapping RES minions (bsc#1147126)
- dmidecode does not exist on ppc64le and s390x (bsc#1145119)
- update susemanager.conf to use adler32 for computing the server_id for new minions

-------------------------------------------------------------------
Wed Jul 31 17:42:04 CEST 2019 - jgonzalez@suse.com

- version 4.0.13-1
- Check for result of image rsync transfer to catch failures early (bsc#1104949)
- Force VM off before deleting it (bsc#1138127)
- Allow forcing off or resetting VMs
- Fix the indentation so that custom formulas can be read correctly (bsc#1136937)
- Make sure dmidecode is installed during bootstrap to ensure that hardware
  refresh works for all operating systems (bsc#1137952)
- Prevent stuck Actions when onboarding KVM host minions (bsc#1137888)
- Fix formula name encoding on Python 3 (bsc#1137533)
- Adapt tests for SUSE manager 4.0
- More thorougly disable the Salt mine in util.mgr_mine_config_clean_up (bsc#1135075)

-------------------------------------------------------------------
Wed May 15 15:35:23 CEST 2019 - jgonzalez@suse.com

- version 4.0.12-1
- SPEC cleanup
- Enabling certificate deployment for Leap 15.1 clients which is
  needed for bootstrapping
- States to enable/disable server monitoring
- Improve salt events processing performance (bsc#1125097)

-------------------------------------------------------------------
Mon Apr 22 12:23:43 CEST 2019 - jgonzalez@suse.com

- version 4.0.11-1
- Enable SLES11 OS Image Build Host
- Add support for Salt batch execution mode
- Do not configure Salt Mine in newly registered minions (bsc#1122837)
- use default 'master' branch in OSImage profile URL (bsc#1108218)
- Add Python linting makefile and PyLint configuration file

-------------------------------------------------------------------
Thu Apr 04 14:43:04 CEST 2019 - jgonzalez@suse.com

- version 4.0.10-1
- Update get_kernel_live_version module to support older Salt versions (bsc#1131490)

-------------------------------------------------------------------
Fri Mar 29 10:37:42 CET 2019 - jgonzalez@suse.com

- version 4.0.9-1
- Update get_kernel_live_version module to support SLES 15 live patches
- Support register minion using bootstrap repos for 18.04 and 16.04.

-------------------------------------------------------------------
Mon Mar 25 17:04:34 CET 2019 - jgonzalez@suse.com

- version 4.0.8-1
- Fix Salt error related to remove_traditional_stack when bootstrapping an Ubuntu
  minion (bsc#1128724)
- Adapt disablelocalrepos.sls syntax for Salt 2016.10 (rhel6, sle11) (bsc#1127706)
- Automatically trust SUSE GPG key for client tools channels on Ubuntu systems
- util.systeminfo sls has been added to perform different actions at minion startup(bsc#1122381)

-------------------------------------------------------------------
Sat Mar 02 00:16:05 CET 2019 - jgonzalez@suse.com

- version 4.0.7-1
- Add support for Ubuntu minions
- Add Ubuntu SSL-Cert SLS-Files

-------------------------------------------------------------------
Wed Feb 27 13:17:30 CET 2019 - jgonzalez@suse.com

- version 4.0.6-1
- Fix mgr_events to use current ioloop (bsc#1126280)
- add states for virtual machine actions
- Added option to read 'pkg_download_point_...' pillar values and use it in repo url

-------------------------------------------------------------------
Thu Jan 31 09:45:42 CET 2019 - jgonzalez@suse.com

- version 4.0.5-1
- prevent the pkgset beacon from firing during onboarding (bsc#1122896)
- Prevent excessive DEBUG logging from mgr_events engine

-------------------------------------------------------------------
Wed Jan 16 12:27:07 CET 2019 - jgonzalez@suse.com

- version 4.0.4-1
- Allow bootstrapping minions with a pending minion key being present (bsc#1119727)

-------------------------------------------------------------------
Mon Dec 17 14:46:00 CET 2018 - jgonzalez@suse.com

- version 4.0.3-1
- enhance bootstrap-repo urls for Centos and Opensuse
- use a Salt engine to process return results (bsc#1099988)

-------------------------------------------------------------------
Fri Oct 26 10:52:53 CEST 2018 - jgonzalez@suse.com

- version 4.0.2-1
- deploy SSL certificate during onboarding of openSUSE Leap 15.0 (bsc#1112163)
- install all available known kiwi boot descriptions
- Fix: Cleanup Kiwi cache in highstate (bsc#1109892)
- removed the ssl certificate verification while checking bootstrap repo URL (bsc#1095220)
- Removed the need for curl to be present at bootstrap phase (bsc#1095220)
- Migrate Python code to be Python 2/3 compatible
- Fix merging of image pillars
- Fix: delete old custom OS images pillar before generation (bsc#1105107)
- Generate OS image pillars via Java
- Store activation key in the Kiwi built image
- Implement the 2-phase registration of saltbooted minions (SUMA for Retail)

-------------------------------------------------------------------
Fri Aug 10 15:45:45 CEST 2018 - jgonzalez@suse.com

- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)
- Feat: add OS Image building with Kiwi FATE#322959 FATE#323057 FATE#323056
- Use custom Salt capabilities to prevent breaking backward compatibility (bsc#1096514)
- Update profileupdate.sls to report all versions installed (bsc#1089526)
- Do not install 'python-salt' on container build hosts with older Salt versions
  (bsc#1097699)
- Fix bootstrap error when removing traditional stack (bsc#1096009)

-------------------------------------------------------------------
Wed May 23 09:03:37 CEST 2018 - jgonzalez@suse.com

- version 3.2.13-1
- Changes to mgractionchains module in order to support action chains on
  minions using ssh-push connection method.
- Fix migration from traditional stack to salt registration (bsc#1093825)

-------------------------------------------------------------------
Wed May 16 17:38:30 CEST 2018 - jgonzalez@suse.com

- version 3.2.12-1
- Fix external pillar formula "ifempty" and "namespace" handling
- Fix profileupdate sls to execute retrieval of kernel live patching info (bsc#1091052)
- Use recursive merge on form pillars
- install python2/3 salt flavours on buildhosts to generate a compatible
  thin for the dockerimage beeing build (bsc#1092161)
- docker.login requires a list as input (bsc#1092161)

-------------------------------------------------------------------
Mon May 07 15:31:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.11-1
- fix hardware refresh when FQDN changes (bsc#1073267)
- Handle empty values. Do not pass optional fields to pillar in
  formulas if field is empty and no ifempty attr defined.
- Fixed processing of formulas with $scope: group
- Preserve order of formulas (bsc#1083326)

-------------------------------------------------------------------
Wed Apr 25 12:13:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.10-1
- create bootstrap repo only if it exist in the server (bsc#1087840)

-------------------------------------------------------------------
Mon Apr 23 09:26:09 CEST 2018 - jgonzalez@suse.com

- version 3.2.9-1
- Enqueue states applied from 'mgractionchains' to avoid failures when
  other states are already running at that time (bsc#1090502)

-------------------------------------------------------------------
Wed Apr 04 12:14:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.8-1
- Fix 'mgractionchains.resume' output when nothing to resume (bsc#1087401)

-------------------------------------------------------------------
Thu Mar 29 01:28:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.7-1
- Do not execute sumautil.get_kernel_live_version when inspecting an image

-------------------------------------------------------------------
Mon Mar 26 09:15:31 CEST 2018 - jgonzalez@suse.com

- version 3.2.6-1
- Provide new Salt module and Reactor to handle Action Chains on Minions
- use dockermod with new salt and user repository/tag option for build
- adapt names for gpg keys which have been changed
- perform docker login before building and inspecting images (bsc#1085635)

-------------------------------------------------------------------
Mon Mar 05 09:09:19 CET 2018 - jgonzalez@suse.com

- version 3.2.5-1
- support SLE15 product family

-------------------------------------------------------------------
Wed Feb 28 10:15:38 CET 2018 - jgonzalez@suse.com

- version 3.2.4-1
- Remove SUSE Manager repositories when deleting salt minions
  (bsc#1079847)
- Fix master tops merging when running salt>=2018

-------------------------------------------------------------------
Mon Feb 05 12:53:28 CET 2018 - jgonzalez@suse.com

- version 3.2.3-1
- Allow scheduling the change of software channel changes as an
  action. The previous channels remain accessible to the registered
  system until the action is executed.

-------------------------------------------------------------------
Fri Feb 02 13:06:31 CET 2018 - jgonzalez@suse.com

- version 3.2.2-1
- compare osmajorrelease in jinja always as integer

-------------------------------------------------------------------
Wed Jan 17 13:31:27 CET 2018 - jgonzalez@suse.com

- version 3.2.1-1
- addition of parameters to package manipulation states to improve
  SUSE Manager performance
- python3 compatibility fixes in modules and states
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)
- Fix image inspect when entrypoint is used by overwriting it
  (bsc#1070782)

-------------------------------------------------------------------
Tue Dec 12 12:05:09 CET 2017 - jgonzalez@suse.com

- version 3.1.13-1
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Wed Nov 29 10:15:59 CET 2017 - jgonzalez@suse.com

- version 3.1.12-1
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)

-------------------------------------------------------------------
Tue Nov 28 15:18:20 CET 2017 - jgonzalez@suse.com

- version 3.1.11-1
- Added state templates for deploying/comparing config channels for Salt
- Fix failing certs state for Tumbleweed (bsc#970630)
- Fix deprecated SLS files to avoid deprecation warnings during highstate (bsc#1041993)
- Support xccdf 1.2 namespace in openscap result file (bsc#1059319)
- ensure correct ordering of patches (bsc#1059801)
- fix create empty top.sls with no-op (bsc#1053038)
- Enabling certificate deployment for Leap 42.3 clients which is
  needed for bootstrapping
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Thu Sep 14 11:41:56 CEST 2017 - mc@suse.de

- version 3.1.10-1
- Kubernetes runner implementation
- addition of parameters to package manipulation states to improve
  SUSE Manager performance

-------------------------------------------------------------------
Fri Jul 21 12:02:24 CEST 2017 - mc@suse.de

- version 3.1.9-1
- disable gpgcheck for bootstrap repo to work with new libzypp (bsc#1049670)
- Remove spacewalk:* repos when removing traditional stack (bsc#1024267)
- susemanager-sls: fix certs state for Tumbleweed (bsc970630)
- susemanager-sls: fix certs state for Leap 42.2 (bsc970630)
- Make sumautil.get_kernel_live_version accept any kgr output 'active: NUM'
  where NUM > 0 (bsc#1044074)

-------------------------------------------------------------------
Mon Jun 19 16:37:53 CEST 2017 - mc@suse.de

- version 3.1.8-1
- Avoids formula leaking on pillar data (bsc#1044236)

-------------------------------------------------------------------
Mon May 29 15:53:51 CEST 2017 - mc@suse.de

- version 3.1.7-1
- fix yum plugin when installing patches on RHEL6 (bsc#1039294)
- Remove suseRegisterInfo in a separate yum transaction so that
  it's not called by yum plugin (bsc#1038732)
- Refactoring formulas in suma_minion external pillar (bsc#1033825)
- configure mime also during bootstrapping
- add missing file name attr to yum plugin state
- Encode formula to str (bsc#1033825)
- update yum on RedHat like systems
- update basic packages when bootstrapping with salt
- use include instead of state.apply channels to fix salt-ssh issue
  (bsc#1036268)

-------------------------------------------------------------------
Wed May 03 15:55:46 CEST 2017 - michele.bologna@suse.com

- version 3.1.6-1
- Targeting patches instead of packages for non Zypper patch installation
- add certificate state for CAASP
- add certificate state for SLES for SAP (bsc#1031659)

-------------------------------------------------------------------
Mon Apr 03 14:47:46 CEST 2017 - mc@suse.de

- version 3.1.5-1
- patch application pre-download
- pre-download packages scheduled for install

-------------------------------------------------------------------
Fri Mar 31 09:48:52 CEST 2017 - mc@suse.de

- version 3.1.4-1
- Fix mainframesysinfo module to use /proc/sysinfo on SLES11
  (bsc#1025758)
- take care that container and images are removed after inspect
- add name to Bootstrap repo
- Pre-create empty top.sls with no-op (bsc#1017754)
- create a random container name
- Fix pkgset beacon (bsc#1029350)
- set minion own key owner to bootstrap ssh_push_sudo_user
- runner to generate ssh key and execute cmd via proxies
- change ssh bootstrap state to generate and auth keys for
  salt-ssh push with tunnel

-------------------------------------------------------------------
Tue Mar 07 14:55:32 CET 2017 - mc@suse.de

- version 3.1.3-1
- add xccdf result xslt
- move move_minion_uploaded_files runner
- call docker inspect for additional data
- remove the container after inspecting it
- do not call image profile automatically after build
- Add state for image profileupdate
- add SUSE Manager prefix to state ids

-------------------------------------------------------------------
Tue Feb 07 15:12:30 CET 2017 - michele.bologna@suse.com

- version 3.1.2-1
- Configure mine.update to submit a job return event (bsc#1022735)
- Disable spacewalksd and spacewalk-update-status when switching to salt
  registration (bsc#1020902)
- Fix timezone handling for rpm installtime (bsc#1017078)
- Push build images into registry
- Configure a Docker build host
- Salt version update

-------------------------------------------------------------------
Wed Jan 11 16:57:58 CET 2017 - michele.bologna@suse.com

- version 3.1.1-1
- Version bump to 3.1

-------------------------------------------------------------------
Fri Dec 16 12:14:52 CET 2016 - michele.bologna@suse.com

- version 0.1.18-1
- Rename 'master' pillar to 'mgr_server'
- Add tunneling to salt-ssh support
- Provide SUMA static pillar data for unregistered minions (bsc#1015122)
- implement fetching kernel live version as module (FATE#319519)
- Removing '/usr/share/susemanager/pillar' path
- Retreiving SUMA static pillar data from ext_pillar (bsc1010674)
- Bugfix: Prevent salt-master ERROR messages if formulas files are missing
  (bsc#1009004)
- fallback to major os release version for cert names (bsc#1009749)

-------------------------------------------------------------------
Mon Nov 07 11:37:52 CET 2016 - michele.bologna@suse.com

- version 0.1.17-1
- Sync custom modules,grains,beacons always before pkg and hw profileupdate
  (bsc#1004725)
- Write distupgrade state for SP migration via salt
- New location of the salt-ssh key/cert pair. The previous location wasn't
  writable by the salt user

-------------------------------------------------------------------
Thu Oct 13 12:50:28 CEST 2016 - mc@suse.de

- version 0.1.16-1
- Only normalize lists (bsc#1004456)
- Call normalize() before add_scsi_info() (bsc#1004456)

-------------------------------------------------------------------
Thu Oct 06 14:51:43 CEST 2016 - mc@suse.de

- version 0.1.15-1
- Fixed bug with numbers in FormulaForm and improved ext_pillar script
- Added formula directories and formulas.sls to setup script
- External pillar script now also includes formula pillars
- Rename symlinks according to changed 'os' grain for Expanded Support
- Adding certs states for RHEL minion based on SLES-ES
- Rename udevdb scsi info json key
- Add support for mapping mainframe sysinfo
- Implement isX86() in jinja more correctly
- Initial support for querying and saving DMI info
- Add support for mapping the devices
- Actually handle incoming hardware details
- Initial version of the hardware.profileupdate sls
- Added pkgset beacon support in susemanager yum plugin
- trust also RES GPG key on all RedHat minions
- trust GPG keys for SUSE Manager Tools channel on RES
- configure bootstrap repository for RES
- Always enable salt-minion service while bootstrapping (bsc#990202)
- CentOS cert state symlinks and fixes
- states for installing certificate on redhat minions
- pkg.list_products only on Suse
- yum plugin to add jwt token as http header
- Generate SLE 12 bootstrap repo path correctly (bsc#994578)
- Merging top.sls files in base env (bsc#986770)
- Watch files instead of require

-------------------------------------------------------------------
Mon Jul 18 14:23:32 CEST 2016 - jrenner@suse.com

- version 0.1.14-1
- Initial version of the boostrap sls file
- update trust store when multiple certs in one file are available on SLE11
- update ca certificates only when they have changed
- assume no pillar data if the yml file for the minion does not exist
  (bsc#980354)
- Add distributable pkgset beacon for RPM database notifications

-------------------------------------------------------------------
Tue May 24 16:04:20 CEST 2016 - kwalter@suse.com

- version 0.1.13-1
- require refresh channels before pkg states (bsc#975424)
- use pillar and static states to install/remove packages (bsc#975424)

-------------------------------------------------------------------
Tue Apr 12 17:15:01 CEST 2016 - mc@suse.de

- version 0.1.12-1
- Add external pillar minion data resolver (bsc#974853)
- Add readme about ext_pillars
- remove pillar top.sls (bsc#974853)

-------------------------------------------------------------------
Wed Apr 06 08:46:20 CEST 2016 - mc@suse.de

- version 0.1.11-1
- generate include only if group_ids not empty
- use state names in custom_groups (bsc#973452)
- rename pillar group_id to group_ids
- Fix generating blank repositories because hitting salt file list cache
  (bsc#971004)
- package pillar/top.sls (bsc#973569)
- pre require coreutils to create the cert symlink in post (bsc#972160)
- disable local repositories on registration (bnc#971788)

-------------------------------------------------------------------
Mon Mar 21 17:38:33 CET 2016 - mc@suse.de

- version 0.1.10-1
- remove unused ext_pillar
- ignore missing .sls to include in certs/init.sls
- ignore packages_{machine_id}.sls if it's missing
- ignore missing pillar files at minion level
- ignore missing sls or pillars in custom_XXX/init.sls
  (bnc#970461, bnc#970316)
- Include minion custom_<machine_id>.sls only if it exists (#bnc970461)
- Ignore missing org custom state (#bnc970461)
- refactor in python (#bnc970316) (#bnc970461)

-------------------------------------------------------------------
Wed Mar 09 11:29:45 CET 2016 - mc@suse.de

- version 0.1.9-1
- include org and groups separately in top.sls
- refresh pillar on remove from group
- initial suma groups external pillar

-------------------------------------------------------------------
Wed Mar 02 12:09:13 CET 2016 - mc@suse.de

- version 0.1.8-1
- rename tables

-------------------------------------------------------------------
Tue Jan 26 14:07:41 CET 2016 - mc@suse.de

- version 0.1.7-1
- cleanup python code according to PR review
- reworked sumautil network utils to be more pythonic
- remove commented code
- get network if modules, checkstyle cleanup
- get minion primary ips

-------------------------------------------------------------------
Sat Jan 16 11:38:17 CET 2016 - mc@suse.de

- version 0.1.6-1
- custom grain for total num of cpus

-------------------------------------------------------------------
Thu Jan 14 13:30:59 CET 2016 - mc@suse.de

- version 0.1.5-1
- Port client python HW handling to server side java
- CPU socket count: try also lscpu and dmidecode

-------------------------------------------------------------------
Tue Jan 05 15:55:57 CET 2016 - mc@suse.de

- version 0.1.4-1
- Fill General and DMI hw info on minion registration

-------------------------------------------------------------------
Wed Dec 16 11:28:21 CET 2015 - mc@suse.de

- version 0.1.3-1
- Add static sls for package management

-------------------------------------------------------------------
Mon Nov 30 11:15:47 CET 2015 - mc@suse.de

- version 0.1.2-1
- force link creation
- use osfullname instead of os
- Cover sles12 machines reporing os grain SUSE
- Add support for deploying certificates to SLES11 minions

-------------------------------------------------------------------
Tue Nov 17 09:35:38 CET 2015 - jrenner@suse.com

- version 0.1.1-1
- Initial package release
   070701000000F0000081B400000000000000000000000160C1E96E00001F97000000000000000000000000000000000000002500000000susemanager-sls/susemanager-sls.spec  #
# spec file for package susemanager-sls
#
# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#

%if 0%{?suse_version} > 1320
# SLE15 builds on Python 3
%global build_py3   1
%endif

Name:           susemanager-sls
Version:        4.1.28
Release:        1
Summary:        Static Salt state files for SUSE Manager
License:        GPL-2.0-only
Group:          Applications/Internet
Source:         %{name}-%{version}.tar.gz
Requires(pre):  coreutils
Requires:       susemanager-build-keys-web >= 12.0.1
%if 0%{?build_py3}
BuildRequires:  python3-pytest
BuildRequires:  python3-mock
BuildRequires:  python3-salt
Requires:       python3-PyYAML >= 5.1
%else
BuildRequires:  python-pytest
BuildRequires:  python-mock
BuildRequires:  python-salt
Requires:       python-PyYAML >= 5.1
%endif
BuildRoot:      %{_tmppath}/%{name}-%{version}-build
BuildArch:      noarch

%description
Static Salt state files for SUSE Manager, where generic operations are
provided for the integration between infrastructure components.

%package -n uyuni-config-modules
Summary:        Salt modules to configure a Server
Group:          Applications/Internet

%description -n uyuni-config-modules
This package contains Salt execution and state modules that can be used
to configure a SUSE Manager or Uyuni Server.

%prep
%setup -q

%build

%install
mkdir -p %{buildroot}/usr/share/susemanager/salt/_grains
mkdir -p %{buildroot}/usr/share/susemanager/salt/_beacons
mkdir -p %{buildroot}/usr/share/susemanager/salt/_modules
mkdir -p %{buildroot}/usr/share/susemanager/salt/_states
mkdir -p %{buildroot}/usr/share/susemanager/modules/pillar
mkdir -p %{buildroot}/usr/share/susemanager/modules/tops
mkdir -p %{buildroot}/usr/share/susemanager/modules/runners
mkdir -p %{buildroot}/usr/share/susemanager/modules/engines
mkdir -p %{buildroot}/usr/share/susemanager/pillar_data
mkdir -p %{buildroot}/usr/share/susemanager/formulas
mkdir -p %{buildroot}/usr/share/susemanager/formulas/metadata
mkdir -p %{buildroot}/usr/share/susemanager/reactor
mkdir -p %{buildroot}/usr/share/susemanager/scap
mkdir -p %{buildroot}/srv/formula_metadata
cp -R salt/* %{buildroot}/usr/share/susemanager/salt
cp -R modules/pillar/* %{buildroot}/usr/share/susemanager/modules/pillar
cp -R modules/tops/* %{buildroot}/usr/share/susemanager/modules/tops
cp -R modules/runners/* %{buildroot}/usr/share/susemanager/modules/runners
cp -R modules/engines/* %{buildroot}/usr/share/susemanager/modules/engines
cp -R pillar_data/* %{buildroot}/usr/share/susemanager/pillar_data
cp -R formulas/* %{buildroot}/usr/share/susemanager/formulas
cp -R formula_metadata/* %{buildroot}/srv/formula_metadata
cp -R reactor/* %{buildroot}/usr/share/susemanager/reactor
cp -R scap/* %{buildroot}/usr/share/susemanager/scap

# Manually install Python part to already prepared structure
cp src/beacons/pkgset.py %{buildroot}/usr/share/susemanager/salt/_beacons
cp src/beacons/virtpoller.py %{buildroot}/usr/share/susemanager/salt/_beacons
cp src/grains/cpuinfo.py %{buildroot}/usr/share/susemanager/salt/_grains/
cp src/grains/public_cloud.py %{buildroot}/usr/share/susemanager/salt/_grains/
cp src/modules/sumautil.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mainframesysinfo.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/udevdb.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mgractionchains.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/kiwi_info.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/kiwi_source.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mgrclusters.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mgr_caasp_manager.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/ssh_agent.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/uyuni_config.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/states/product.py %{buildroot}/usr/share/susemanager/salt/_states
cp src/states/mgrcompat.py %{buildroot}/usr/share/susemanager/salt/_states
cp src/states/uyuni_config.py %{buildroot}/usr/share/susemanager/salt/_states

# Install doc, examples
mkdir -p %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples/ldap
cp src/doc/* %{buildroot}/usr/share/doc/packages/uyuni-config-modules/
cp src/examples/uyuni_config_hardcode.sls %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples
cp src/examples/ldap/* %{buildroot}/usr/share/doc/packages/uyuni-config-modules/examples/ldap

%check
cd test
py.test test_pillar_suma_minion.py
cd ../src/tests
py.test

# Check that SLS files don't contain any call to "module.run" which has
# been replaced by "mgrcompat.module_run" calls.
! grep --include "*.sls" -r "module\.run" %{buildroot}/usr/share/susemanager/salt || exit 1

%post
# HACK! Create broken link when it will be replaces with the real file
ln -sf /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT \
   /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT 2>&1 ||:
# Pre-create top.sls to suppress empty/absent top.sls warning/error (bsc#1017754)
USERLAND="/srv/salt"
TOP="$USERLAND/top.sls"
if [ -d "$USERLAND" ]; then
    if [ ! -f "$TOP" ]; then
	cat <<EOF >> $TOP
# This only calls no-op statement from
# /usr/share/susemanager/salt/util/noop.sls state
# Feel free to change it.

base:
  '*':
    - util.noop
EOF
    fi
fi
# Restrict Java RMI to localhost (bsc#1184617)
restrict_to_localhost()
{
  JMXREMOTE_HOST='-Dcom.sun.management.jmxremote.host='
  JMXREMOTE_PORT='-Dcom.sun.management.jmxremote.port='
  RMI_SERVER_HOSTNAME='-Djava.rmi.server.hostname='
  systemctl is-enabled ${1} > /dev/null 2>&1
  jmx_exporter_enabled=$?
  grep -q -- $JMXREMOTE_HOST ${2}
  jmxremote_host_configured=$?
  if [ $jmx_exporter_enabled -eq 0 ] && [ $jmxremote_host_configured -eq 1 ]; then
    sed -ri "s/JAVA_OPTS=\"(.*)${JMXREMOTE_PORT}(.*)\"/JAVA_OPTS=\"\1${JMXREMOTE_HOST}localhost\ ${JMXREMOTE_PORT}\2\"/" ${2}
    sed -ri "s/JAVA_OPTS=\"(.*)${RMI_SERVER_HOSTNAME}\S*(.*)\"/JAVA_OPTS=\"\1${RMI_SERVER_HOSTNAME}localhost\2\"/" ${2}
  fi
}
tomcat_config=/etc/sysconfig/tomcat
taskomatic_config=/etc/rhn/taskomatic.conf
if [ $1 -gt 1 ] && [ -e $tomcat_config ]; then
  restrict_to_localhost prometheus-jmx_exporter@tomcat.service $tomcat_config
fi
if [ $1 -gt 1 ] && [ -e $taskomatic_config ]; then
  restrict_to_localhost prometheus-jmx_exporter@taskomatic.service $taskomatic_config
fi

%files
%defattr(-,root,root)
%dir /usr/share/susemanager
/usr/share/susemanager/salt
/usr/share/susemanager/pillar_data
/usr/share/susemanager/modules
/usr/share/susemanager/modules/pillar
/usr/share/susemanager/modules/tops
/usr/share/susemanager/modules/runners
/usr/share/susemanager/modules/engines
/usr/share/susemanager/formulas
/usr/share/susemanager/reactor
/usr/share/susemanager/scap
/srv/formula_metadata
%exclude /usr/share/susemanager/salt/_modules/uyuni_config.py
%exclude /usr/share/susemanager/salt/_states/uyuni_config.py
%ghost /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT

%files -n uyuni-config-modules
%defattr(-,root,root)
%dir /usr/share/susemanager
/usr/share/susemanager/salt/_modules/uyuni_config.py
/usr/share/susemanager/salt/_states/uyuni_config.py
%dir /usr/share/doc/packages/uyuni-config-modules
%doc /usr/share/doc/packages/uyuni-config-modules/*
%doc /usr/share/doc/packages/uyuni-config-modules/examples/*
%doc /usr/share/doc/packages/uyuni-config-modules/examples/ldap/*

%changelog
 070701000000F1000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001500000000susemanager-sls/test  070701000000F2000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000001A00000000susemanager-sls/test/data 070701000000F3000081B400000000000000000000000160C1E96E000000B6000000000000000000000000000000000000002D00000000susemanager-sls/test/data/formula_order.json  ["branch-network","cpu-mitigations","dhcpd","grafana","image-synchronize","locale","prometheus","prometheus-exporters","pxe","saltboot","tftpd","virtualization-host","vsftpd","bind"]  070701000000F4000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002300000000susemanager-sls/test/data/formulas    070701000000F5000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000002C00000000susemanager-sls/test/data/formulas/metadata   070701000000F6000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003100000000susemanager-sls/test/data/formulas/metadata/bind  070701000000F7000081B400000000000000000000000160C1E96E00000A23000000000000000000000000000000000000003A00000000susemanager-sls/test/data/formulas/metadata/bind/form.yml bind:
  $type: hidden-group

  config:
    $type: group
    options:
      $type: edit-group
      $optional: True
      $prototype:
        $type: text
        $key:
          $type: text
          $name: Option
    include_forwarders:
          $type: boolean
          $default: false

  configured_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      type:
        $type:  select
        $values: ["master", "slave"]
        $default: master
      notify:
        $type: boolean
        $default: False

  available_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      file:
        $type: text
      soa:
        $name: SOA
        $type: group
        ns:
          $name: NS
          $type: text
          $placeholder: ns@zone
          $ifEmpty: ns
        contact:
          $type: text
          $placeholder: admin@domain
          $ifEmpty: root@localhost
        serial:
          $default:  auto
          $ifEmpty:  auto
        class:
          $default:  IN
        refresh:
          $default:  8600
          $type: number
        retry:
          $default:  900
          $type: number
        expiry:
          $default:  86000
          $type: number
        nxdomain:
          $name: NXDOMAIN
          $default:  500
          $type: number
        ttl:
          $name: TTL
          $default:  8600
          $type: number
      records:
        $type: group
        A: 
          $type: edit-group
          $optional: true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Hostname
            $type: text
            $name: IP address
        NS:
          $name: NS
          $type: group
          $optional:  true
          '@':
             $type: edit-group
             $minItems: 0
             $prototype:
               $type: text
        CNAME:
          $name: CNAME
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Alias
            $type: text
            $name: Hostname
      generate_reverse: 
        $type: group
        $optional:  true
        net:
          $name: Network
          $optional:  true
        for_zones:
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $type: text
 070701000000F8000081B400000000000000000000000160C1E96E00000069000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/bind/metadata.yml description:
  Settings for bind nameserver
group: general_system_configuration
after:
  - branch-network   070701000000F9000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/branch-network    070701000000FA000081B400000000000000000000000160C1E96E0000093C000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/branch-network/form.yml   branch_network:
  $type: hidden-group
  dedicated_NIC:
    $type: boolean
    $default: True

  nic:
    $default: eth1
    $visibleIf: .dedicated_NIC == true
  ip:
    $default: 192.168.128.1
    $visibleIf: .dedicated_NIC == true
  netmask:
    $default: 255.255.255.0
    $visibleIf: .dedicated_NIC == true

  configure_firewall:
    $type: boolean
    $default: true
    $help: Uncheck to configure firewall manually.

  firewall:
    $type: group
    $visibleIf: .configure_firewall == true
    enable_route:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_NAT:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_SLAAC_with_routing:
      $type: boolean
      $default: False
      $visibleIf: .enable_NAT == true
      $name: Force enable IPv6 SLAAC together with forwarding
      $help: Check to enable IPv6 autoconfiguration (SLAAC) even when Branch act as a router.
    open_dhcp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_dns_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_tftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_http_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_https_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_salt_ports:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ssh_port:
      $type: boolean
      $default: True
    open_xmpp_server_port:
      $type: boolean
      $default: True
    open_xmpp_client_port:
      $type: boolean
      $default: True

  forwarder:
    $type: select
    $values:
      - resolver
      - bind
      - dnsmasq
    $default: bind

  forwarder_fallback:
    $type: boolean
    $default: True

  srv_directory:
    $name:  'server directory'
    $type: text
    $default: '/srv/saltboot'
  srv_directory_user:
    $name: 'server directory user'
    $type: text
    $default: 'saltboot'
  srv_directory_group:
    $name: 'server directory group'
    $type: text
    $default: 'saltboot'
070701000000FB000081B400000000000000000000000160C1E96E0000005C000000000000000000000000000000000000004800000000susemanager-sls/test/data/formulas/metadata/branch-network/metadata.yml   description:
  Configuration of Branch Server proxy networks
group: SUSE_manager_for_retail
070701000000FC000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations   070701000000FD000081B400000000000000000000000160C1E96E000000BA000000000000000000000000000000000000004500000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/form.yml  mitigations:
  $type: group

  name:
    $type: select
    $values: ["Auto",
              "Auto + No SMT",
              "Off",
              "Manual"
             ]
    $default: Auto
  070701000000FE000081B400000000000000000000000160C1E96E00000063000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/metadata.yml  description:
  Settings for kernel options for performance/security.
group: security_configuration
 070701000000FF000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/dhcpd 07070100000100000081B400000000000000000000000160C1E96E00001284000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/dhcpd/form.yml    dhcpd:
  $type: namespace
  domain_name:
    $placeholder: Enter domain name for managed LAN
  domain_name_servers:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
  listen_interfaces:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
    $help: List of interfaces to listen on
    $default:
    - eth1
  authoritative:
    $type: boolean
    $default: True
  max_lease_time:
    $default: 20001
    $type: number
  default_lease_time:
    $default: 20000
    $type: number
  subnets:
    $type: edit-group
    $minItems: 1
    $name: Network Configuration (subnet)
    $itemName: Network ${i}
    $prototype:
        $type: group
        $key:
          $type: text
          $name: Network IP
          $default: 192.168.1.0
        netmask:
          $type: text
          $default: 255.255.255.0
        domain_name:
          $type: text
          $optional: true
        comment:
          $type: text
          $optional: true
        range:
          $type: edit-group
          $name: Dynamic IP Range
          $minItems: 2
          $maxItems: 2
          $prototype:
            $type: text
          $default:
          - 192.168.1.51
          - 192.168.1.151
        broadcast_address:
          $type: text
          $default: 192.168.1.255
        routers:
          $type: edit-group
          $minItems: 1
          $prototype:
            $type: text
          $default:
          - 192.168.1.1
        next_server:
          $type: text
          $default: 192.168.1.1
          $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
          $optional: true
        filename:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/pxelinux.0
          $help: Specify the name of the initial boot file which is to be loaded by a client
          $optional: true
        filename_efi:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/grub.efi
          $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
          $optional: true
        hosts:
          $type: edit-group
          $minItems: 0
          $itemName: Host ${i}
          $name: Hosts with Static IP Addresses (with Defaults from Subnet)
          $optional: true
          $prototype:
            $key:
                $type: text
                $name: Hostname
            fixed_address:
                $type: text
                $optional: true
                $name: IP Address
            hardware:
                $type: text
                $name: Hardware Type and Address
                $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
                $help: Hardware Identifier - ethernet prefix is mandatory
            next_server:
                $type: text
                $default:
                $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
                $optional: true
            filename:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client
                $optional: true
            filename_efi:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
                $optional: true
            comment:
                $type: text
  hosts:
    $type: edit-group
    $minItems: 0
    $itemName: Host ${i}
    $name: Hosts with static IP addresses (with global defaults)
    $optional: true
    $prototype:
      $key:
        $type: text
        $name: Hostname
      fixed_address:
        $type: text
        $optional: true
        $name: IP address
      hardware:
        $type: text
        $name: Hardware Type and Address
        $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
        $help: Hardware Identifier - ethernet prefix is mandatory
      next_server:
        $type: text
        $default:
        $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
        $optional: true
      filename:
        $type: text
        $visibleIf: .next_server != ''
        $default:
        $help: Specify the name of the initial boot file which is to be loaded by a client
        $optional: true
      comment:
        $type: text
07070100000101000081B400000000000000000000000160C1E96E00000065000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/dhcpd/metadata.yml    description:
  Settings for DHCP server
group: general_system_configuration
after:
  - branch-network   07070100000102000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003400000000susemanager-sls/test/data/formulas/metadata/grafana   07070100000103000081B400000000000000000000000160C1E96E0000074B000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/grafana/form.yml  grafana:
  $type: namespace

  enabled:
    $type: boolean
    $default: True
    $help: disasbled grafana

  admin_user:
    $type: text
    $name: Default admin user
    $required: true
    $disabled: "!formValues.grafana.enabled"
    
  admin_pass:
    $type: password
    $name: Default admin password  
    $required: true
    $disabled: "!formValues.grafana.enabled"

  datasources:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Configure the data sources used by Grafana.

    prometheus:
      $type: edit-group
      $minItems: 1
      $name: Prometheus
      $help: Configure Prometheus data sources.
      $itemName: Prometheus data source ${i}
      $prototype:
        $type: group
        $disabled: "!formValues.grafana.enabled"
        $key:
          $type: text
          $name: Datasource name
          $default: Prometheus
          $help: Data source name
        url:
          $type: url
          $default: http://localhost:9080
          $required: true
          $name: Prometheus URL
          $help: URL of a Prometheus instance

  dashboards:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Dashboards to install.

    add_uyuni_dashboard:
      $type: boolean
      $name: Uyuni server dashboard
      $help: Add dashboard for monitoring an Uyuni server
      $default: True

    add_uyuni_clients_dashboard:
      $type: boolean
      $name: Uyuni clients dashboard
      $help: Add dashboard for monitoring Uyuni clients
      $default: True

    add_postgresql_dasboard:
      $type: boolean
      $name: PostgreSQL dashboard
      $help: Add dashboard for monitoring a PostgreSQL database
      $default: True

    add_apache_dashboard:
      $type: boolean
      $name: Apache HTTPD dashboard
      $help: Add dashboard for monitoring an Apache HTTPD server
      $default: True
 07070100000104000081B400000000000000000000000160C1E96E0000003F000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/grafana/metadata.yml  description:
  Enable and configure Grafana.
group: monitoring
 07070100000105000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/image-synchronize 07070100000106000081B400000000000000000000000160C1E96E00000206000000000000000000000000000000000000004700000000susemanager-sls/test/data/formulas/metadata/image-synchronize/form.yml    image-synchronize:
    $type: hidden-group
    in_highstate:
        $name: Include Image Synchronization in Highstate
        $type: boolean
        $default: false

    whitelist:
        $type: edit-group
        $name: Synchronize only the listed images
        $minItems: 0
        $prototype:
            $type: text
            $help: Image name (without version)

    default_boot_image:
        $type: text
        $name: Default boot image
        $help: Default boot image used for first boot of a terminal
  07070100000107000081B400000000000000000000000160C1E96E00000051000000000000000000000000000000000000004B00000000susemanager-sls/test/data/formulas/metadata/image-synchronize/metadata.yml    description:
  Settings for image synchronization
group: SUSE_manager_for_retail
   07070100000108000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/locale    07070100000109000081B400000000000000000000000160C1E96E00001537000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/locale/form.yml   # This file is part of locale-formula.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <http://www.gnu.org/licenses/>.

timezone:
  $type: group

  name:
    $type: select
    $values: ["CET",
              "CST6CDT",
              "EET",
              "EST",
              "EST5EDT",
              "GMT",
              "GMT+0",
              "GMT-0",
              "GMT0",
              "Greenwich",
              "HST",
              "MET",
              "MST",
              "MST7MDT",
              "NZ",
              "NZ-CHAT",
              "Navajo",
              "PST8PDT",
              "UCT",
              "UTC",
              "Universal",
              "W-SU",
              "WET",
              "Zulu",
              "Etc/GMT+1",
              "Etc/GMT+2",
              "Etc/GMT+3",
              "Etc/GMT+4",
              "Etc/GMT+5",
              "Etc/GMT+6",
              "Etc/GMT+7",
              "Etc/GMT+8",
              "Etc/GMT+9",
              "Etc/GMT+10",
              "Etc/GMT+11",
              "Etc/GMT+12",
              "Etc/GMT-1",
              "Etc/GMT-2",
              "Etc/GMT-3",
              "Etc/GMT-4",
              "Etc/GMT-5",
              "Etc/GMT-6",
              "Etc/GMT-7",
              "Etc/GMT-8",
              "Etc/GMT-9",
              "Etc/GMT-10",
              "Etc/GMT-11",
              "Etc/GMT-12",
              "Etc/GMT-13",
              "Etc/GMT-14",
              "Etc/GMT",
              "Etc/GMT+0",
              "Etc/GMT-0",
              "Etc/GMT0",
              "Etc/Greenwich",
              "Etc/UCT",
              "Etc/UTC",
              "Etc/Universal",
              "Etc/Zulu" 
              ]
    $default: CET

  hardware_clock_set_to_utc:
    $type: boolean
    $default: True

keyboard_and_language:
  $type: group

  language:
    $type: select
    $values: ["Afrikaans",
              "Arabic",
              "Asturian",
              "Bulgarian",
              "Bengali",
              "Bosnian",
              "Catalan",
              "Czech",
              "Welsh",
              "Danish",
              "German",
              "Greek",
              "English (UK)",
              "English (US)",
              "Spanish",
              "Estonian",
              "Finnish",
              "French",
              "Galician",
              "Gujarati",
              "Hebrew",
              "Hindi",
              "Croatian",
              "Hungarian",
              "Indonesian",
              "Italian",
              "Japanese",
              "Georgian",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Macedonian",
              "Marathi",
              "Norwegian",
              "Dutch",
              "Nynorsk",
              "Punjabi",
              "Polish",
              "Portuguese (Brazilian)",
              "Portuguese",
              "Romanian",
              "Russian",
              "Sinhala",
              "Slovak",
              "Slovenian",
              "Serbian",
              "Swedish",
              "Tamil",
              "Tajik",
              "Thai",
              "Turkish",
              "Ukrainian",
              "Vietnamese",
              "Walloon",
              "Xhosa",
              "Simplified Chinese",
              "Traditional Chinese",
              "Zulu"
              ]
    $default: English (US)

  keyboard_layout:
    $type: select
    $values: ["Arabic",
              "Belgian",
              "Canadian (Multilingual)",
              "Croatian",
              "Czech",
              "Czech (qwerty)",
              "Danish",
              "Dutch",
              "Dvorak",
              "English (UK)",
              "English (US)",
              "Estonian",
              "Finnish",
              "French",
              "French (Canada)",
              "French (Switzerland)",
              "German",
              "German (Switzerland)",
              "German (with deadkeys)",
              "Greek",
              "Hungarian",
              "Icelandic",
              "Italian",
              "Japanese",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Norwegian",
              "Polish",
              "Portuguese",
              "Portuguese (Brazil)",
              "Portuguese (Brazil  US accents)",
              "Romanian",
              "Russian",
              "Serbian",
              "Simplified Chinese",
              "Slovak",
              "Slovak (qwerty)",
              "Slovene",
              "Spanish",
              "Spanish (Asturian variant)",
              "Spanish (CP 850)",
              "Spanish (Latin America)",
              "Swedish",
              "Tajik",
              "Traditional Chinese",
              "Turkish",
              "Ukrainian",
              "US International" 
              ]
    $default: English (US)
 0707010000010A000081B400000000000000000000000160C1E96E00000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/locale/metadata.yml   description:
  Settings for language, keyboard, and timezone
group: general_system_configuration
after:
  - users   0707010000010B000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters  0707010000010C000081B400000000000000000000000160C1E96E000003FF000000000000000000000000000000000000004A00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/form.yml node_exporter:
  $type: group
  $help: Prometheus exporter for hardware and OS metrics.

  enabled:
    $type: boolean
    $default: True

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9100"
    $help: Please refer to the documentation for available options.

apache_exporter:
  $type: group
  $help: Prometheus exporter for apache mod_status statistics.

  enabled:
    $type: boolean
    $default: False

  args:
    $name: "Arguments"
    $type: text
    $default: --telemetry.address=":9117"
    $help: Please refer to the documentation for available options.

postgres_exporter:
  $type: group
  $help: Prometheus exporter for PostgreSQL server metrics.

  enabled:
    $type: boolean
    $default: False

  data_source_name:
    $type: text
    $default: postgresql://user:passwd@localhost:5432/database?sslmode=disable

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9187"
    $help: Please refer to the documentation for available options.
 0707010000010D000081B400000000000000000000000160C1E96E00000061000000000000000000000000000000000000004E00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/metadata.yml description:
  Enable and configure Prometheus exporters for managed systems.
group: monitoring

   0707010000010E000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003700000000susemanager-sls/test/data/formulas/metadata/prometheus    0707010000010F000081B400000000000000000000000160C1E96E0000093C000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/prometheus/form.yml   prometheus:
  $type: namespace

  enabled:
    $type: boolean
    $default: True

  scrape_interval:
    $type: number
    $name: Scrape interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  evaluation_interval:
    $type: number
    $name: Evaluation interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  mgr:
    $type: group
    $name: Uyuni Server
    $disabled: "!prometheus.enabled"

    monitor_server:
      $name: Monitor this server
      $type: boolean
      $default: True

    autodiscover_clients:
      $name: Autodiscover clients 
      $type: boolean
      $default: True

    sd_username:
      $type: text
      $name: Username
      $help: Username for auto-discovering clients
      $default: admin
      $visibleIf: .autodiscover_clients == true
      $required: true

    sd_password:
      $type: password
      $name: Password
      $help: Password for auto-discovering clients
      $visibleIf: .autodiscover_clients == true
      $required: true

  alerting:
    $type: group
    $disabled: "!prometheus.enabled"

    alertmanager_service:
      $type: boolean
      $default: True
      $name: Enable local Alertmanager service

    use_local_alertmanager:
      $type: boolean
      $name: Use local Alertmanager
      $help: Use local Alertmanager for this Prometheus instance
      $visibleIf: .alertmanager_service == true
      $default: True

    alertmanagers:
      $type: edit-group
      $minItems: 0
      $itemName: Target ${i}
      $prototype:
        $type: group 
        $key:
          $type: text 
          $name: "IP Address : Port"
          $default: localhost:9093
          $match: ".*\\:\\d{1,5}"

    rule_files:
      $type: edit-group
      $minItems: 0
      $prototype:
        $type: text
        $default: /etc/prometheus/my-rules.yml
        $required: true

  scrape_configs:
    $type: edit-group
    $name: User defined scrape configurations
    $minItems: 0
    $itemName: File-based service discovery ${i}
    $disabled: "!prometheus.enabled"
    $prototype:
      $type: group 
      $key:
        $type: text 
        $name: "Job name"
      files:
        $type: edit-group
        $minItems: 1
        $prototype:
          $type: text
          $default: /etc/prometheus/my-scrape-config.yml
          $required: true


07070100000110000081B400000000000000000000000160C1E96E00000042000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/prometheus/metadata.yml   description:
  Enable and configure Prometheus
group: monitoring

  07070100000111000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003000000000susemanager-sls/test/data/formulas/metadata/pxe   07070100000112000081B400000000000000000000000160C1E96E000002B4000000000000000000000000000000000000003900000000susemanager-sls/test/data/formulas/metadata/pxe/form.yml  pxe:
  $type: hidden-group

  kernel_name:
     $name: 'Kernel Filename'
     $type: text
     $default: 'linux'

  initrd_name:
     $name: 'Initrd Filename'
     $type: text
     $default: 'initrd.gz'

  default_kernel_parameters:
     $name: 'Kernel Command Line Parameters'
     $type: text
     $default: 'panic=60 ramdisk_size=710000 ramdisk_blocksize=4096 vga=0x317 splash=silent kiwidebug=0'

  pxe_root_directory:
     $name:  'PXE Root Directory'
     $type: text
     $default: '/srv/saltboot'

  branch_id:
     $name: 'Branch Id'
     $type: text
     $placeholder: 'Enter unique Branch server ID (e.g. "B0001")'
     $help: 'Branch server ID is used as a prefix in terminal ID'
07070100000113000081B400000000000000000000000160C1E96E00000067000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/pxe/metadata.yml  description:
  PXE settings for branch server
group: SUSE_manager_for_retail
after:
  - branch-network
 07070100000114000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003500000000susemanager-sls/test/data/formulas/metadata/saltboot  07070100000115000081B400000000000000000000000160C1E96E0000157C000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/saltboot/form.yml partitioning:
    $name: Disk Partitioning
    $type: edit-group
    $itemName: Disk ${i}
    $minItems: 1
    $prototype:
        $type: group
        $key:
            $type: text
            $name: Disk Symbolic ID
            $placeholder: Enter disk symbolic ID (e.g. disk1, disk2, md0 for RAID devices)
            $help: Disk Symbolic ID is used together with Partition Symbolic ID for RAID completion.
        type:
            $type: select
            $name: Device Type
            $values:
              - RAID
              - DISK
            $default: DISK
        device:
            $type: text
            $visibleIf: .type == DISK
            $name: Disk Device
            $placeholder: Enter target disk device name (e.g. /dev/sda)
            $optional: true
        level:
            $visibleIf: .type == RAID
            $type: select
            $name: RAID Level
            $values:
              -
              - 0
              - 1
              - 4
              - 5
              - 6
              - 10
              - linear
              - multipath
            $default:
            $optional: true
        devices:
            $visibleIf: .type == RAID
            $type: edit-group
            $name: Symbolic IDs of devices to used for RAID device type
            $minItems: 0
            $prototype:
                $type: text
                $help: E.g. disk1p1, disk2p1, ... Combination of Disk symbolic ID and Partition symbolic ID to describe devices/partitions used to build RAID device.
                $placeholder: Enter combination of Disk and Partition symbolic ID (e.g. disk1part1, disk2part1, ...)
            $optional: True
        disklabel:
            $type: select
            $name: Partition table type
            $values:
              - gpt
              - msdos
              - none
        partitions:
            $type: edit-group
            $itemName: Partition ${i}
            $minItems: 1
            $optional: True
            $visibleIf: .disklabel != "none"
            $prototype:
                $type: group
                $key:
                    $type: text
                    $name: Partition Symbolic ID
                    $help: E.g. p1, p2, ... Together with Disk symbolic ID is used for RAID completion.
                    $placeholder: Enter partition symbolic ID (e.g. part1, part2, ...)
                size_MiB:
                    $type: number
                    $name: Partition Size (MiB)
                    $help: Leave blank to acquire remaining empty space on the disk.
                    $optional: True
                mountpoint:
                    $type: text
                    $name: Device Mount Point
                    $help: What should the partition be mount as - /, swap, /var, ...
                    $optional: True
                format:
                    $type: select
                    $name: Filesystem Format
                    $values:
                      -
                      - btrfs
                      - ext4
                      - xfs
                      - vfat
                      - swap
                    $optional: True
                image:
                    $type: text
                    $name: OS Image to Deploy
                    $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
                    $optional: True
                image_version:
                    $visibleIf: .image != ''
                    $type: text
                    $help: Version of OS Image. Leave blank for most recent.
                    $optional: True
                luks_pass:
                    $optional: True
                    $type: text
                    $name: Partition Encryption Password
                    $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
                flags:
                    $type: select
                    $name: Partition Flags
                    $values:
                      -
                      - swap
                      - raid
                      - bios_grub
                      - esp
                      - boot
                    $default:
        mountpoint:
            $type: text
            $name: Device Mount Point
            $help: What should the partition be mount as - /, swap, /var, ...
            $optional: True
            $visibleIf: .disklabel == "none"
        format:
            $type: select
            $name: Filesystem Format
            $visibleIf: .disklabel == "none"
            $values:
              -
              - btrfs
              - ext4
              - xfs
              - vfat
              - swap
            $optional: True
        image:
            $visibleIf: .disklabel == "none"
            $type: text
            $name: OS Image to Deploy
            $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
            $optional: True
        image_version:
            $visibleIf: .image != ''
            $type: text
            $help: Version of OS Image. Leave blank for most recent.
            $optional: True
        luks_pass:
            $visibleIf: .disklabel == "none"
            $optional: True
            $type: text
            $name: Partition Encryption Password
            $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
07070100000116000081B400000000000000000000000160C1E96E0000005B000000000000000000000000000000000000004200000000susemanager-sls/test/data/formulas/metadata/saltboot/metadata.yml description:
  Control deployment and boot of POS terminals
group: SUSE_manager_for_retail
 07070100000117000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/tftpd 07070100000118000081B400000000000000000000000160C1E96E00000137000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/tftpd/form.yml    tftpd:
  $type: hidden-group

  listen_ip:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  root_dir:
     $name: 'TFTP base directory'
     $type: text
     $default: '/srv/tftpboot'

  tftpd_user:
     $name: 'run TFTP under user'
     $type: text
     $default: 'tftp'

      
 07070100000119000081B400000000000000000000000160C1E96E00000068000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/tftpd/metadata.yml    description:
  Settings for tftpd service
group: general_system_configuration
after:
  - branch-network
0707010000011A000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/virtualization-host   0707010000011B000081B400000000000000000000000160C1E96E0000005F000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/virtualization-host/form.yml  hypervisor:
  $type: select
  $values: ["KVM",
            "Xen"
            ]
  $default: KVM
 0707010000011C000081B400000000000000000000000160C1E96E00000055000000000000000000000000000000000000004D00000000susemanager-sls/test/data/formulas/metadata/virtualization-host/metadata.yml  description:
  Settings for virtualization host.
group: general_system_configuration
   0707010000011D000041FD00000000000000000000000160C1E96E00000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/vsftpd    0707010000011E000081B400000000000000000000000160C1E96E00000604000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/vsftpd/form.yml   vsftpd_config:
  $type: hidden-group

  anon_root:
     $name: 'FTP server directory'
     $type: text
     $default: '/srv/ftp'

     
  listen_address:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  ssl_enable:
     $name:  'Enable ssl'
     $type: boolean
     $default: false
     
  secure_chroot_dir: 
     $name:  'Chroot dir'
     $type: text
     $default: '/usr/share/empty'

  anonymous_enable:
     $name:  'Allow anonymous FTP'
     $type: boolean
     $default: true

  allow_anon_ssl:
     $name:  'Allow SSL for anonymous'
     $type: boolean
     $default: true

  listen:
     $name:  'Run standalone'
     $type: boolean
     $default: true

  local_enable:
     $name:  'Allow local users'
     $type: boolean
     $default: true

  dirmessage_enable:
     $name:  'Activate directory messages'
     $type: boolean
     $default: true

  use_localtime: 
     $name:  'Use localtime'
     $type: boolean
     $default: true

  xferlog_enable: 
     $name:  'Activate logging of transfers'
     $type: boolean
     $default: true

  connect_from_port_20: 
     $name:  'Connect from port 20'
     $type: boolean
     $default: true

  pam_service_name: 
     $name:  'PAM service name'
     $type: text
     $default: 'vsftpd'

  rsa_cert_file:
     $name:  'RSA certificate file'
     $type: text
     $default: '/etc/ssl/certs/[ssl-cert-file].pem'

  rsa_private_key_file:
     $name:  'RSA private key file'
     $type: text
     $default: '/etc/ssl/private/[ssl-cert-file].key'

      0707010000011F000081B400000000000000000000000160C1E96E00000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/vsftpd/metadata.yml   description:
  Settings for vsftpd for branchserver
group: general_system_configuration
after:
  - branch-network   07070100000120000081B400000000000000000000000160C1E96E00000018000000000000000000000000000000000000002E00000000susemanager-sls/test/data/group_formulas.json {"9":["locale","tftpd"]}07070100000121000081B400000000000000000000000160C1E96E00000077000000000000000000000000000000000000002F00000000susemanager-sls/test/data/minion_formulas.json    {"suma-refhead-min-centos7.mgr.suse.de":["branch-network"],"suma-refhead-min-sles12sp4.mgr.suse.de":["branch-network"]} 07070100000122000081B400000000000000000000000160C1E96E00001F64000000000000000000000000000000000000002400000000susemanager-sls/test/test_engine.py   import logging
import pytest
import psycopg2
import shlex
import subprocess
from mgr_events import Responder, DEFAULT_COMMIT_BURST
from mock import MagicMock, patch, call
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database, drop_database


ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger('mgr_events')
log.setLevel(logging.DEBUG)
log.addHandler(ch)


@pytest.fixture(scope="session")
def postgres(request):
    proc = subprocess.Popen(shlex.split("su postgres -c \"pg_ctl -D ~/data -l ~/logfile start\""))
    def finalizer():
        subprocess.Popen(shlex.split("su postgres -c \"pg_ctl stop -D /var/lib/pgsql/data\""))
    request.addfinalizer(finalizer)
    outs, errs = proc.communicate(timeout=15)
    yield proc


@pytest.fixture(scope="session")
def db_engine(postgres):
    return create_engine("postgresql://postgres@/test")


@pytest.fixture
def db_connection(db_engine):
    if not database_exists(db_engine.url):
        create_database(db_engine.url)
    with psycopg2.connect(user='postgres', host="localhost", dbname="test") as connection:
        yield connection
    drop_database(db_engine.url)


def new_connection():
    return psycopg2.connect(user='postgres', host="localhost", dbname="test")


@pytest.fixture
def create_tables(db_connection):
    sql = """CREATE TABLE suseSaltEvent (
        id SERIAL PRIMARY KEY,
        minion_id CHARACTER VARYING(256),
        data TEXT NOT NULL,
        queue NUMERIC NOT NULL
    );"""
    db_connection.cursor().execute(sql)
    db_connection.commit()


def delete_table(conn, table):
    conn.cursor().execute("DELETE FROM %s" % table)
    conn.commit()


@pytest.fixture
def responder(db_connection, create_tables):
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        return Responder(
            MagicMock(),  # mock event_bus
            {
                'postgres_db': {
                     'dbname': 'tests',
                     'user': 'postgres',
                     'password': '',
                     'host': 'localhost',
                     'notify_channel': 'suseSaltEvent'
                 },
                'events': {
                    'thread_pool_size': 3
                }
            }
        )


def test_connection_recovery_on_insert(db_connection, responder):
    disposable_connection = new_connection()
    responder.connection = disposable_connection
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder._insert('salt/minion/2/start', {'value': 2})
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 2


def test_connection_recovery_on_commit(db_connection, responder):
    responder.connection = new_connection()
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder.attempt_commit()
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_insert_start_event(responder, db_connection):
    responder.event_bus.unpack.return_value = ('salt/minion/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1


def test_insert_job_return_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_insert_batch_start_event(responder):
    responder.event_bus.unpack.return_value = ('salt/batch/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_discard_batch_presence_ping_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'metadata': {'batch-mode': True}})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 0


def test_keep_presence_ping_event_without_batch(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'id': 'testminion'})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_commit_scheduled_on_init(responder):
    assert responder.event_bus.io_loop.call_later.call_count == 1


def test_commit_empty_queue(responder):
    responder.counters = [0, 0, 0, 0]
    with patch.object(responder, 'event_bus', MagicMock()):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            responder.attempt_commit()
            assert responder.connection.commit.call_count == 0
        assert responder.tokens == DEFAULT_COMMIT_BURST


def test_postgres_notification(responder):
    with patch.object(responder, 'cursor'):
        responder._insert('salt/minion/1/start', {'value': 1, 'id': 'testminion'})
        assert responder.counters == [0, 0, 0, 0]
        assert responder.tokens == DEFAULT_COMMIT_BURST -1
        assert responder.cursor.execute.mock_calls[-1:] == [call("NOTIFY suseSaltEvent, '0,0,1,0';")]

def test_add_token(responder):
    responder.tokens = 0
    responder.add_token()
    assert responder.tokens == 1

def test_add_token_max(responder):
    responder.add_token()
    assert responder.tokens == DEFAULT_COMMIT_BURST

def test_commit_avoidance_without_tokens(responder):
    with patch.object(responder, 'cursor'):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            mock_connection.encoding = 'utf-8'
            responder.tokens = 0
            responder._insert('salt/minion/1/start', {'id': 'testminion', 'value': 1})
            assert responder.counters == [0, 0, 1, 0]
            assert responder.tokens == 0
            assert responder.connection.commit.call_count == 0
            assert responder.cursor.execute.mock_calls == [call('INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);', ('testminion', '{"tag": "salt/minion/1/start", "data": {"id": "testminion", "value": 1}}', 2))]


def test_postgres_connect(db_connection, responder):
    disposable_connection = new_connection()
    disposable_connection.close()
    responder.connection = disposable_connection
    with patch('mgr_events.time') as mock_time:
        with patch('mgr_events.psycopg2') as mock_psycopg2:
            mock_psycopg2.connect.side_effect = [psycopg2.OperationalError, db_connection]
            mock_psycopg2.OperationalError = psycopg2.OperationalError
            responder.db_keepalive()
            assert mock_psycopg2.connect.call_count == 2
    mock_time.sleep.assert_called_once_with(5)


def test_postgres_connect_with_port(responder):
    responder.config['postgres_db']['port'] = '1234'
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        responder._connect_to_database()
        mock_psycopg2.connect.assert_called_once_with(u"dbname='tests' user='postgres' host='localhost' port='1234' password=''")
07070100000123000081B400000000000000000000000160C1E96E0000039B000000000000000000000000000000000000003000000000susemanager-sls/test/test_pillar_suma_minion.py   # -*- coding: utf-8 -*-
'''
:codeauthor:    Michael Calmer <Michael.Calmer@suse.com>
'''

from mock import MagicMock, patch

import sys
sys.path.append("../modules/pillar")
import os
import copy

import suma_minion


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert suma_minion.__virtual__() == True

def test_formula_pillars():
    '''
    Test formula ordering
    '''
    suma_minion.FORMULAS_DATA_PATH = os.path.sep.join([os.path.abspath(''), 'data'])
    suma_minion.FORMULA_ORDER_FILE = os.path.sep.join([os.path.abspath(''), 'data', 'formula_order.json'])
    suma_minion.MANAGER_FORMULAS_METADATA_MANAGER_PATH = os.path.sep.join([os.path.abspath(''), 'data', 'formulas', 'metadata'])
    pillar = suma_minion.formula_pillars("suma-refhead-min-sles12sp4.mgr.suse.de", [9])
    assert "formulas" in pillar
    assert pillar["formulas"] == ['branch-network', 'locale', 'tftpd']

 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!                                                                                                                                                                                                                                                                                                                        