07070100000000000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001000000000susemanager-sls   07070100000001000081B40000000000000000000000015EA152C400000023000000000000000000000000000000000000001B00000000susemanager-sls/.gitignore    *.cache*
*__pycache__*
*.pyc
*.pyo
 07070100000002000081B40000000000000000000000015EA152C400000347000000000000000000000000000000000000002000000000susemanager-sls/Makefile.python   THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../../rel-eng/Makefile.python

# Docker tests variables
DOCKER_CONTAINER_BASE = suma-4.0
DOCKER_REGISTRY       = registry.mgr.suse.de
DOCKER_RUN_EXPORT     = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES        = -v "$(CURDIR)/../../:/manager"

__pylint ::
	$(call update_pip_env)
	pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true

docker_pylint ::
	docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls/; make -f Makefile.python __pylint"

docker_shell ::
	docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash
 07070100000003000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002100000000susemanager-sls/formula_metadata  07070100000004000081B40000000000000000000000015EA152C4000001A1000000000000000000000000000000000000002B00000000susemanager-sls/formula_metadata/README.md    All metadata for your custom Salt Formulas should be put here. (/srv/formula_metadata/<your-formula-name>/)
The state files need to be on a salt file root and belong to /srv/salt.

To learn more about Salt Formulas and how to write them visit: https://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html
To use your formulas effectively with SUSE Manager they additionally need a form.yml file.
   07070100000005000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001900000000susemanager-sls/formulas  07070100000006000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002200000000susemanager-sls/formulas/metadata 07070100000007000081B40000000000000000000000015EA152C4000001A4000000000000000000000000000000000000002C00000000susemanager-sls/formulas/metadata/README.md   The metadata of Salt Formulas that get installed per RPM belongs in this directory.

For more information visit:
https://github.com/SUSE/spacewalk/wiki/Using-Salt-formulas-with-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Writing-Salt-Formulas-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/Salt-Formula-RPMs-for-SUSE-Manager
https://github.com/SUSE/spacewalk/wiki/How-Salt-formulas-in-SUSE-Manager-work
07070100000008000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/formulas/states   07070100000009000081B40000000000000000000000015EA152C400000022000000000000000000000000000000000000002D00000000susemanager-sls/formulas/states/formulas.sls  include: {{ pillar["formulas"] }}
  0707010000000A000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001800000000susemanager-sls/modules   0707010000000B000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/modules/engines   0707010000000C000081B40000000000000000000000015EA152C400001DEE000000000000000000000000000000000000002E00000000susemanager-sls/modules/engines/mgr_events.py # -*- coding: utf-8 -*-
'''
mgr_events.py is a SaltStack engine that writes selected events to SUSE
Manager's PostgreSQL database. Additionally, it sends notifications via the
LISTEN/NOTIFY mechanism to alert SUSE Manager of newly available events.

mgr_events.py tries to keep the I/O low in high load scenarios. Therefore
events are INSERTed once they come in, but not necessarily COMMITted
immediately.

The algorithm is an implementation of token bucket:
 - a COMMIT costs one token
 - initially, commit_burst tokens are available
 - every commit_interval seconds, one new token is generated
   (up to commit_burst)
 - when an event arrives and there are tokens available it is COMMITted
   immediately
 - when an event arrives but no tokens are available, the event is INSERTed but
   not COMMITted yet. COMMIT will happen as soon as a token is available

.. versionadded:: 2018.3.0

:depends: psycopg2

Minimal configuration example

.. code:: yaml

    engines:
      - mgr_events:
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              notify_channel: suseSaltEvent

Full configuration example

.. code:: yaml

    engines:
      - mgr_events:
          commit_interval: 1
          commit_burst: 100
          postgres_db:
              dbname: susemanger
              user: spacewalk
              password: spacewalk
              host: localhost
              port: 5432
              notify_channel: suseSaltEvent

Most of the values have a sane default. But we still need the login and host
for the PostgreSQL database. Only the `notify_channel` there is optional. The
default for host is 'localhost'.
'''

# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import fnmatch
import hashlib

try:
    import psycopg2
    HAS_PSYCOPG2 = True
except ImportError:
    HAS_PSYCOPG2 = False

# Import salt libs
import salt.utils.event
import json

# Import third-party libs
import tornado
from salt.utils.zeromq import ZMQDefaultLoop

log = logging.getLogger(__name__)

DEFAULT_COMMIT_INTERVAL = 1
DEFAULT_COMMIT_BURST = 100

def __virtual__():
    return HAS_PSYCOPG2


class Responder:
    def __init__(self, event_bus, config):
        self.config = config
        self.config.setdefault('commit_interval', DEFAULT_COMMIT_INTERVAL)
        self.config.setdefault('commit_burst', DEFAULT_COMMIT_BURST)
        self.config.setdefault('postgres_db', {})
        self.config['postgres_db'].setdefault('host', 'localhost')
        self.config['postgres_db'].setdefault('notify_channel', 'suseSaltEvent')
        self.counters = [0 for i in range(config['events']['thread_pool_size'] + 1)]
        self.tokens = config['commit_burst']
        self.event_bus = event_bus
        self._connect_to_database()
        self.event_bus.io_loop.call_later(config['commit_interval'], self.add_token)

    def _connect_to_database(self):
        db_config = self.config.get('postgres_db')
        if 'port' in db_config:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' port='{port}' password='{password}'".format(**db_config)
        else:
            conn_string = "dbname='{dbname}' user='{user}' host='{host}' password='{password}'".format(**db_config)
        log.debug("%s: connecting to database", __name__)
        while True:
            try:
                self.connection = psycopg2.connect(conn_string)
                break
            except psycopg2.OperationalError as err:
                log.error("%s: %s", __name__, err)
                log.error("%s: Retrying in 5 seconds.", __name__)
                time.sleep(5)
        self.cursor = self.connection.cursor()

    def _insert(self, tag, data):
        self.db_keepalive()
        if any([
            fnmatch.fnmatch(tag, "salt/minion/*/start"),
            fnmatch.fnmatch(tag, "salt/job/*/ret/*"),
            fnmatch.fnmatch(tag, "salt/beacon/*"),
            fnmatch.fnmatch(tag, "salt/engines/*"),
            fnmatch.fnmatch(tag, "salt/batch/*/start"),
            fnmatch.fnmatch(tag, "suse/manager/image_deployed"),
            fnmatch.fnmatch(tag, "suse/systemid/generate")
        ]) and not self._is_salt_mine_event(tag, data) and not self._is_presence_ping(tag, data):
            queue = 0
            if 'id' in data:
                hash_sum = hashlib.md5(data.get("id").encode(self.connection.encoding)).hexdigest()[0:8]
                queue = int(hash_sum, 16) % self.config['events']['thread_pool_size'] + 1
            log.debug("%s: Adding event to queue %d -> %s", __name__, queue, tag)
            try:
                self.cursor.execute(
                    'INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);',
                    (data.get("id"), json.dumps({'tag': tag, 'data': data}), queue)
                )
                self.counters[queue] += 1
                self.attempt_commit()
            except Exception as err:
                log.error("%s: %s", __name__, err)
            finally:
                log.debug("%s: %s", __name__, self.cursor.query)
        else:
            log.debug("%s: Discarding event -> %s", __name__, tag)

    def trace_log(self):
        log.trace("%s: queues sizes -> %s", __name__, self.counters)
        log.trace("%s: tokens -> %s", __name__, self.tokens)

    def _is_salt_mine_event(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_salt_mine_update(data)

    def _is_salt_mine_update(self, data):
        return data.get("fun") == "mine.update"

    def _is_presence_ping(self, tag, data):
        return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_test_ping(data) and self._is_batch_mode(data)

    def _is_test_ping(self, data):
        return data.get("fun") == "test.ping"

    def _is_batch_mode(self, data):
        return data.get("metadata", {}).get("batch-mode")

    @tornado.gen.coroutine
    def add_event_to_queue(self, raw):
        tag, data = self.event_bus.unpack(raw, self.event_bus.serial)
        self._insert(tag, data)

    def db_keepalive(self):
        if self.connection.closed:
            log.error("%s: Diconnected from database. Trying to reconnect...", __name__)
            self._connect_to_database()

    @tornado.gen.coroutine
    def add_token(self):
        self.tokens = min(self.tokens + 1, self.config['commit_burst'])
        self.attempt_commit()
        self.trace_log()
        self.event_bus.io_loop.call_later(self.config['commit_interval'], self.add_token)

    def attempt_commit(self):
        """
        Committing to the database.
        """
        self.db_keepalive()
        if self.tokens > 0 and sum(self.counters) > 0:
            log.debug("%s: commit", __name__)
            self.cursor.execute(
                "NOTIFY {}, '{}';".format(
                    self.config['postgres_db']['notify_channel'],
                    ",".join([str(counter) for counter in self.counters]))
            )
            self.connection.commit()
            self.counters = [0 for i in range(0, self.config['events']['thread_pool_size'] + 1)]
            self.tokens -=1

def start(**config):
    '''
    Listen to events and write them to the Postgres database
    '''
    io_loop = ZMQDefaultLoop.current()
    event_bus = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=True,
            io_loop=io_loop)
    responder = Responder(event_bus, config)
    event_bus.set_event_handler(responder.add_event_to_queue)
    io_loop.start()
  0707010000000D000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/pillar    0707010000000E000081B40000000000000000000000015EA152C400000147000000000000000000000000000000000000002900000000susemanager-sls/modules/pillar/README.md  Overview
========

1. In the "/etc/salt/master" add the following:

   extension_modules: /path/to/the/extension_pillar_modules

2. Copy *.py from this directory to the `extension_modules` directory.

3. Then, in the "/etc/salt/master" add the following:

   ext_pillar:
     - suma_minion: /another/path/with/the/pillar/files
 0707010000000F000081B40000000000000000000000015EA152C400002DD2000000000000000000000000000000000000002E00000000susemanager-sls/modules/pillar/suma_minion.py # -*- coding: utf-8 -*-
'''
Retrieve SUSE Manager pillar data for a minion_id.
- Adds generated and static SUSE Manager pillar data.
- Adds formula pillar data.

.. code-block:: yaml

    ext_pillar:
      - suma_minion: True

'''

# Import python libs
from __future__ import absolute_import
from enum import Enum
import os
import logging
import yaml
import json
import sys
import re
import salt.utils.dictupdate
import salt.utils.stringutils

# SUSE Manager static pillar paths:
MANAGER_STATIC_PILLAR_DATA_PATH = '/usr/share/susemanager/pillar_data'
MANAGER_PILLAR_DATA_PATH = '/srv/susemanager/pillar_data'

# SUSE Manager formulas paths:
MANAGER_FORMULAS_METADATA_MANAGER_PATH = '/usr/share/susemanager/formulas/metadata'
MANAGER_FORMULAS_METADATA_STANDALONE_PATH = '/usr/share/salt-formulas/metadata'
CUSTOM_FORMULAS_METADATA_PATH = '/srv/formula_metadata'
FORMULAS_DATA_PATH = '/srv/susemanager/formula_data'
FORMULA_ORDER_FILE = FORMULAS_DATA_PATH + '/formula_order.json'

# OS images path:
IMAGES_DATA_PATH = os.path.join(MANAGER_PILLAR_DATA_PATH, 'images')

# SUSE Manager static pillar data.
MANAGER_STATIC_PILLAR = [
    'gpgkeys'
]

MANAGER_GLOBAL_PILLAR = [
    'mgr_conf'
]

MINION_PILLAR_FILES_PREFIX = "pillar_{minion_id}"
MINION_PILLAR_FILES_SUFFIXES = [".yml", "_group_memberships.yml"]

CONFIG_FILE = '/etc/rhn/rhn.conf'

# Fomula group subtypes
class EditGroupSubtype(Enum):
    PRIMITIVE_LIST = "PRIMITIVE_LIST"
    PRIMITIVE_DICTIONARY = "PRIMITIVE_DICTIONARY"
    LIST_OF_DICTIONARIES = "LIST_OF_DICTIONARIES"
    DICTIONARY_OF_DICTIONARIES = "DICTIONARY_OF_DICTIONARIES"

# Set up logging
log = logging.getLogger(__name__)


def __virtual__():
    '''
    Ensure the pillar module name.
    '''
    return True

def ext_pillar(minion_id, *args):
    '''
    Find SUMA-related pillars for the registered minions and return the data.
    '''

    log.debug('Getting pillar data for the minion "{0}"'.format(minion_id))
    ret = {}

    # Including SUSE Manager static pillar data
    for static_pillar in MANAGER_STATIC_PILLAR:
        static_pillar_filename = os.path.join(MANAGER_STATIC_PILLAR_DATA_PATH, static_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(static_pillar_filename)).read()))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(static_pillar_filename, exc))

    # Including SUSE Manager global pillar data
    for global_pillar in MANAGER_GLOBAL_PILLAR:
        global_pillar_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, global_pillar)
        try:
            ret.update(yaml.load(open('{0}.yml'.format(global_pillar_filename)).read()))
        except Exception as exc:
            log.error('Error accessing "{0}": {1}'.format(global_pillar_filename, exc))

    # Including generated pillar data for this minion
    minion_pillar_filename_prefix = MINION_PILLAR_FILES_PREFIX.format(minion_id=minion_id)
    for suffix in MINION_PILLAR_FILES_SUFFIXES:
        data_filename = os.path.join(MANAGER_PILLAR_DATA_PATH, minion_pillar_filename_prefix + suffix)
        if os.path.exists(data_filename):
            try:
                ret.update(yaml.load(open(data_filename).read()))
            except Exception as error:
                log.error('Error accessing "{pillar_file}": {message}'.format(pillar_file=data_filename, message=str(error)))

    # Including formulas into pillar data
    try:
        ret.update(formula_pillars(minion_id, ret.get("group_ids", [])))
    except Exception as error:
        log.error('Error accessing formula pillar data: {message}'.format(message=str(error)))

    # Including images pillar
    try:
        ret.update(image_pillars(minion_id))
    except Exception as error:
        log.error('Error accessing image pillar data: {}'.format(str(error)))

    return ret


def load_formulas_from_file(formula_filename):
    formulas = {}
    formula_file = os.path.join(FORMULAS_DATA_PATH, formula_filename)
    if os.path.exists(formula_file):
        try:
            with open(formula_file) as f:
                formulas = json.load(f)
        except Exception as error:
            log.error('Error loading formulas from file: {message}'.format(message=str(error)))
    return formulas


def formula_pillars(minion_id, group_ids):
    '''
    Find formula pillars for the minion, merge them and return the data.
    '''
    pillar = {}
    out_formulas = []

    # Loading group formulas
    data = load_formulas_from_file("group_formulas.json")
    for group in group_ids:
        for formula in data.get(str(group), []):
            formula_utf8 = salt.utils.stringutils.to_str(formula)
            if formula_utf8 in out_formulas:
                continue # already processed
            out_formulas.append(formula_utf8)
            pillar = salt.utils.dictupdate.merge(pillar,
                     load_formula_pillar(minion_id, group, formula),
                     strategy='recurse')

    # Loading minion formulas
    data = load_formulas_from_file("minion_formulas.json")
    for formula in data.get(str(minion_id), []):
        formula_utf8 = salt.utils.stringutils.to_str(formula)
        if formula_utf8 in out_formulas:
            continue # already processed
        out_formulas.append(formula_utf8)
        pillar = salt.utils.dictupdate.merge(pillar,
                 load_formula_pillar(minion_id, None, formula),
                 strategy='recurse')

    # Loading the formula order
    if os.path.exists(FORMULA_ORDER_FILE):
        with open(FORMULA_ORDER_FILE) as ofile:
            order = json.load(ofile)
            pillar["formulas"] = list(filter(lambda i: i in out_formulas, order))
    else:
        pillar["formulas"] = out_formulas

    return pillar


def load_formula_pillar(minion_id, group_id, formula_name):
    '''
    Load the data from a specific formula for a minion in a specific group, merge and return it.
    '''
    layout_filename = os.path.join( MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "form.yml")
    if not os.path.isfile(layout_filename):
        layout_filename = os.path.join(MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "form.yml")
        if not os.path.isfile(layout_filename):
            layout_filename = os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "form.yml")
            if not os.path.isfile(layout_filename):
                log.error('Error loading data for formula "{formula}": No form.yml found'.format(formula=formula_name))
                return {}

    group_filename = os.path.join(FORMULAS_DATA_PATH, "group_pillar", "{id}_{name}.json".format(id=group_id, name=formula_name)) if group_id is not None else None
    system_filename = os.path.join(FORMULAS_DATA_PATH, "pillar", "{id}_{name}.json".format(id=minion_id, name=formula_name))

    try:
        layout = yaml.load(open(layout_filename).read())
        group_data = json.load(open(group_filename)) if group_filename is not None and os.path.isfile(group_filename) else {}
        system_data = json.load(open(system_filename)) if os.path.isfile(system_filename) else {}
    except Exception as error:
        log.error('Error loading data for formula "{formula}": {message}'.format(formula=formula_name, message=str(error)))
        return {}

    merged_data = merge_formula_data(layout, group_data, system_data)
    merged_data = adjust_empty_values(layout, merged_data)
    return merged_data


def merge_formula_data(layout, group_data, system_data, scope="system"):
    '''
    Merge the group and system formula data, respecting the scope of a value.
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_scope = element.get("$scope", scope)
        value = None

        if element.get("$type", "text") in ["group", "hidden-group", "namespace"]:
            value = merge_formula_data(element, group_data.get(element_name, {}), system_data.get(element_name, {}), element_scope)
        # edit-group is handled as primitive element - use either system_data or group data, no merging
        elif element_scope == "system":
            value = system_data.get(element_name, group_data.get(element_name, element.get("$default", element.get("$placeholder", ""))))
        elif element_scope == "group":
            value = group_data.get(element_name, element.get("$default", element.get("$placeholder", "")))
        elif element_scope == "readonly":
            value = element.get("$default", element.get("$placeholder", ""))

        ret[element_name] = value
    return ret


def adjust_empty_values(layout, data):
    '''
    Adjust empty values in formula data
    '''
    ret = {}

    for element_name in layout:
        if element_name.startswith("$"):
            continue

        element = layout[element_name]
        if not isinstance(element, dict):
            continue

        element_type = element.get("$type", "text")
        value = data.get(element_name, "")

        if element_type in ["group", "hidden-group", "namespace"]:
            value = adjust_empty_values(element, data.get(element_name, {}))
        elif element_type in ["edit-group"]:
            prototype = element.get("$prototype")
            subtype = get_edit_group_subtype(element)
            if subtype is EditGroupSubtype.DICTIONARY_OF_DICTIONARIES:
                value = {}
                if isinstance(data.get(element_name), dict):
                    for key, entry in list(data.get(element_name).items()):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value[key] = proc_entry
            elif subtype is EditGroupSubtype.LIST_OF_DICTIONARIES:
                value = []
                if isinstance(data.get(element_name), list):
                    for entry in data.get(element_name):
                        proc_entry = adjust_empty_values(prototype, entry)
                        value.append(proc_entry)

        if not value and '$ifEmpty' in element:
            value = element.get("$ifEmpty")

        if value or not element.get("$optional"):
            ret[element_name] = value
    return ret

def get_edit_group_subtype(element):
    if element is not None and element.get("$prototype"):
        prototype = element.get("$prototype")
        if prototype.get("$key") is None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_LIST
        if prototype.get("$key") is not None and prototype.get("$type", "group") != "group":
            return EditGroupSubtype.PRIMITIVE_DICTIONARY
        if prototype.get("$key") is None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.LIST_OF_DICTIONARIES
        if prototype.get("$key") is not None and prototype.get("$type", "group") == "group":
            return EditGroupSubtype.DICTIONARY_OF_DICTIONARIES
    return None

def image_pillars(minion_id):
    '''
    Load image pillars

    Image pillars are automatically created after image build and are available to all minions
    '''
    ret = {}
    for pillar in os.listdir(IMAGES_DATA_PATH):
        pillar_path = os.path.join(IMAGES_DATA_PATH, pillar)
        if os.path.isfile(pillar_path) and pillar.endswith('.sls'):
            try:
                with open(pillar_path) as p:
                    ret = salt.utils.dictupdate.merge(ret, yaml.load(p.read()), strategy='recurse')
            except Exception as error:
                log.error('Error loading data for image "{image}": {message}'.format(image=pillar.path(), message=str(error)))

    return ret

  07070100000010000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/modules/runners   07070100000011000081B40000000000000000000000015EA152C4000005BD000000000000000000000000000000000000003600000000susemanager-sls/modules/runners/kiwi-image-collect.py # SUSE Manager
# Copyright (c) 2018,2019 SUSE LLC

# runner to collect image from build host

import os
import salt.exceptions
import logging

log = logging.getLogger(__name__)

def upload_file_from_minion(minion, filetoupload, targetdir):
    src = 'root@' + minion + ':' + filetoupload
    result = __salt__['salt.cmd'](
      'rsync.rsync',
      src, targetdir,
      rsh='ssh -o IdentityFile=/srv/susemanager/salt/salt_ssh/mgr_ssh_id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
    )
    if result['retcode'] != 0:
      raise ConnectionError('Failed to transfer image from minion {}: {}'.format(minion, result['stderr']))
    return result

def move_file_from_minion_cache(minion, filetomove, targetdir):
    src = os.path.join(__opts__['cachedir'], 'minions', minion, 'files', filetomove.lstrip('/'))
    # file.move throws an exception in case of error
    return __salt__['salt.cmd']('file.move', src, targetdir);

def kiwi_collect_image(minion, filepath, image_store_dir):
    __salt__['salt.cmd']('file.mkdir', image_store_dir)

    pillars = list(__salt__['cache.pillar'](tgt=minion).values())[0]
    if pillars.get('use_salt_transport'):
      log.info('Collecting image "{}" from minion cache'.format(filepath))
      return move_file_from_minion_cache(minion, filepath, image_store_dir)

    log.info('Collecting image "{}" from minion using rsync'.format(filepath))
    return upload_file_from_minion(minion, filepath, image_store_dir)
   07070100000012000081B40000000000000000000000015EA152C400000757000000000000000000000000000000000000002A00000000susemanager-sls/modules/runners/mgrk8s.py from salt.exceptions import SaltInvocationError

try:
    from kubernetes import client, config # pylint: disable=import-self
    from kubernetes.config import new_client_from_config
    from kubernetes.client.rest import ApiException
    from urllib3.exceptions import HTTPError
    IS_VALID = True
except ImportError as ex:
    IS_VALID = False


def __virtual__():
    return IS_VALID


def get_all_containers(kubeconfig=None, context=None):
    '''
    Retrieve information about all containers running in a Kubernetes cluster.

    :param kubeconfig: path to kubeconfig file
    :param context: context inside kubeconfig
    :return:
    .. code-block:: json
       {
            "containers": [
                {
                    "image_id": "(docker-pullable://)?some/image@sha256:hash....",
                    "image": "myregistry/some/image:v1",
                    "container_id": "(docker|cri-o)://...hash...",
                    "pod_name": "kubernetes-pod",
                    "pod_namespace": "pod-namespace"
                }
       }
    '''
    if not kubeconfig:
        raise SaltInvocationError('kubeconfig is mandatory')

    if not context:
        raise SaltInvocationError('context is mandatory')

    api_client = new_client_from_config(kubeconfig, context)
    api = client.CoreV1Api(api_client)
    pods = api.list_pod_for_all_namespaces(watch=False)
    output = dict(containers=[])
    for pod in pods.items:
        for container in pod.status.container_statuses:
            res_cont = dict()
            res_cont['container_id'] = container.container_id
            res_cont['image'] = container.image
            res_cont['image_id'] = container.image_id
            res_cont['pod_name'] = pod.metadata.name
            res_cont['pod_namespace'] = pod.metadata.namespace
            output['containers'].append(res_cont)

    return output
 07070100000013000081B40000000000000000000000015EA152C4000010A8000000000000000000000000000000000000002B00000000susemanager-sls/modules/runners/mgrutil.py    from subprocess import Popen, PIPE
import logging
import stat
import grp
import shlex
import os
import shutil
import salt.utils

log = logging.getLogger(__name__)

GROUP_OWNER = 'susemanager'


def delete_rejected_key(minion):
    '''
    Delete a previously rejected minion key from minions_rejected
    :param minion: the minion id to look for
    :return: map containing returncode and stdout/stderr
    '''
    path_rejected = "/etc/salt/pki/master/minions_rejected/"
    path = os.path.normpath(path_rejected + minion)
    if not path.startswith(path_rejected):
        return {"returncode": -1, "stderr": "Unexpected path: " + path}
    if os.path.isfile(path):
        cmd = ['rm', path]
        return _cmd(cmd)
    return {"returncode": 0}


def ssh_keygen(path):
    '''
    Generate SSH keys using the given path.
    :param path: the path
    :return: map containing returncode and stdout/stderr
    '''
    if os.path.isfile(path):
        return {"returncode": -1, "stderr": "Key file already exists"}
    cmd = ['ssh-keygen', '-N', '', '-f', path, '-t', 'rsa', '-q']
    # if not os.path.isdir(os.path.dirname(path)):
    #     os.makedirs(os.path.dirname(path))
    return _cmd(cmd)


def chain_ssh_cmd(hosts=None, clientkey=None, proxykey=None, user="root", options=None, command=None, outputfile=None):
    '''
    Chain ssh calls over one or more hops to run a command on the last host in the chain.
    :param hosts:
    :param clientkey:
    :param proxykey:
    :param user:
    :param options:
    :param command:
    :param outputfile:
    :return:
    '''
    cmd = []
    for idx, hostname in enumerate(hosts):
        key = clientkey if idx == 0 else proxykey
        opts = " ".join(["-o {}={}".format(opt, val) for opt, val in list(options.items())])
        ssh = "/usr/bin/ssh -i {} {} -o User={} {}"\
            .format(key, opts, user, hostname)
        cmd.extend(shlex.split(ssh))
    cmd.append(command)
    ret = _cmd(cmd)
    if outputfile:
        with open(outputfile, "w") as out:
            out.write(ret["stdout"])
    return ret


def _cmd(cmd):
    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
    stdout, stderr = p.communicate()
    return {"returncode": p.returncode, "stdout": salt.utils.stringutils.to_unicode(stdout), "stderr": salt.utils.stringutils.to_unicode(stderr)}


def move_minion_uploaded_files(minion=None, dirtomove=None, basepath=None, actionpath=None):
    srcdir = os.path.join(__opts__['cachedir'], "minions", minion, 'files', dirtomove.lstrip('/'))
    scapstorepath = os.path.join(basepath, actionpath)
    susemanager_gid = grp.getgrnam(GROUP_OWNER).gr_gid
    if not os.path.exists(scapstorepath):
        log.debug("Creating action directory: {0}".format(scapstorepath))
        try:
            os.makedirs(scapstorepath)
        except Exception as err:
            log.error('Failed to create dir {0}'.format(scapstorepath), exc_info=True)
            return {False: 'Salt failed to create dir {0}: {1}'.format(scapstorepath, str(err))}
        # change group permissions to rwx and group owner to susemanager
        mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
        subdirs = actionpath.split('/')
        for idx in range(1, len(subdirs)):
            if subdirs[0: idx] != '':
                # ignore errors. If dir has owner != salt then chmod fails but the dir
                # might still have the correct group owner
                try:
                    os.chmod(os.path.join(basepath, *subdirs[0: idx]), mode)
                except OSError:
                    pass
                try:
                    os.chown(os.path.join(basepath, *subdirs[0: idx]), -1, susemanager_gid)
                except OSError:
                    pass

    try:
        # move the files to the scap store dir
        for fl in os.listdir(srcdir):
            shutil.move(os.path.join(srcdir, fl), scapstorepath)
        # change group owner to susemanager
        for fl in os.listdir(scapstorepath):
            os.chown(os.path.join(scapstorepath, fl), -1, susemanager_gid)
    except Exception as err:
        log.error('Salt failed to move {0} -> {1}'.format(srcdir, scapstorepath), exc_info=True)
        return {False: str(err)}
    return {True: scapstorepath}

07070100000014000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001D00000000susemanager-sls/modules/tops  07070100000015000081B40000000000000000000000015EA152C4000004E4000000000000000000000000000000000000003000000000susemanager-sls/modules/tops/mgr_master_tops.py   # -*- coding: utf-8 -*-
'''
SUSE Manager master_tops module
-------------------------------

This module provides the base states top information from SUSE Manager.

The top information returned by this module is merged by Salt with the
user custom data provided in /srv/salt/top.sls file.

.. code-block:: yaml

    master_tops:
      mgr_master_tops: True
'''

# Import python libs
from __future__ import absolute_import
import logging

# Define the module's virtual name
__virtualname__ = 'mgr_master_tops'

log = logging.getLogger(__name__)

MANAGER_BASE_TOP = [
    "channels",
    "certs",
    "packages",
    "custom",
    "custom_groups",
    "custom_org",
    "formulas",
    "services.salt-minion",
    "services.docker",
    "services.kiwi-image-server"
]


def __virtual__():
    '''
    Ensure the module name.
    '''
    return __virtualname__


def top(**kwargs):
    '''
    Returns the SUSE Manager top state information of a minion
    for the `base` salt environment.
    '''
    env = kwargs['opts'].get('environment') or kwargs['opts'].get('saltenv')
    if env in [None, "base"]:
        log.debug('Loading SUSE Manager TOP state information for the "base" environment')
        return {"base": MANAGER_BASE_TOP}
    return None
07070100000016000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001C00000000susemanager-sls/pillar_data   07070100000017000081B40000000000000000000000015EA152C4000001A2000000000000000000000000000000000000002800000000susemanager-sls/pillar_data/gpgkeys.yml   gpgkeys:
  res6tools:
    name: gpg-pubkey-307e3d54
    file: sle11-gpg-pubkey-307e3d54.key
  res7tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res8tools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  res:
    name: gpg-pubkey-0182b964
    file: res-gpg-pubkey-0182b964.key
  ubuntutools:
    name: gpg-pubkey-39db7c82
    file: sle12-gpg-pubkey-39db7c82.key
  07070100000018000081B40000000000000000000000015EA152C40000139D000000000000000000000000000000000000001900000000susemanager-sls/pylintrc  # susemanager-sls package pylint configuration

[MASTER]

# Profiled execution.
profile=no

# Pickle collected data for later comparisons.
persistent=no


[MESSAGES CONTROL]

# Disable the message(s) with the given id(s).


disable=I0011,
	C0302,
	C0111,
	R0801,
	R0902,
	R0903,
	R0904,
	R0912,
	R0913,
	R0914,
	R0915,
	R0921,
	R0922,
	W0142,
	W0403,
	W0603,
	C1001,
	W0121,
	useless-else-on-loop,
	bad-whitespace,
	unpacking-non-sequence,
	superfluous-parens,
	cyclic-import,
	redefined-variable-type,
	no-else-return,

        # Uyuni disabled
	E0203,
	E0611,
	E1101,
	E1102

# list of disabled messages:
#I0011: 62: Locally disabling R0201
#C0302:  1: Too many lines in module (2425)
#C0111:  1: Missing docstring
#R0902: 19:RequestedChannels: Too many instance attributes (9/7)
#R0903:  Too few public methods
#R0904: 26:Transport: Too many public methods (22/20)
#R0912:171:set_slots_from_cert: Too many branches (59/20)
#R0913:101:GETServer.__init__: Too many arguments (11/10)
#R0914:171:set_slots_from_cert: Too many local variables (38/20)
#R0915:171:set_slots_from_cert: Too many statements (169/50)
#W0142:228:MPM_Package.write: Used * or ** magic
#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog'
#W0603: 72:initLOG: Using the global statement
# for pylint-1.0 we also disable
#C1001: 46, 0: Old-style class defined. (old-style-class)
#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax)
#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop)
# pylint-1.1 checks
#C:334, 0: No space allowed after bracket (bad-whitespace)
#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence)
#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens)
#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens)

[REPORTS]

# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable

# Include message's id in output
include-ids=yes

# Tells whether to display a full report or only the messages
reports=yes

# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"

[VARIABLES]

# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy


[BASIC]

# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$

# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$

# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$

# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$

# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$

# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$

# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata

# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input


[DESIGN]

# Maximum number of arguments for function / method
max-args=10

# Maximum number of locals for function / method body
max-locals=20

# Maximum number of return / yield for function / method body
max-returns=6

# Maximum number of branch for function / method body
max-branchs=20

# Maximum number of statements in function / method body
max-statements=50

# Maximum number of parents for a class (see R0901).
max-parents=7

# Maximum number of attributes for a class (see R0902).
max-attributes=7

# Minimum number of public methods for a class (see R0903).
min-public-methods=1

# Maximum number of public methods for a class (see R0904).
max-public-methods=20


[CLASSES]


[FORMAT]

# Maximum number of characters on a single line.
max-line-length=120

# Maximum number of lines in a module
max-module-lines=1000

# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string='    '


[MISCELLANEOUS]

# List of note tags to take in consideration, separated by a comma.
notes=
   07070100000019000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001800000000susemanager-sls/reactor   0707010000001A000081B40000000000000000000000015EA152C40000008A000000000000000000000000000000000000003000000000susemanager-sls/reactor/resume_action_chain.sls   resume_actionchain_execution:
  local.mgractionchains.resume:
    - tgt: {{ data['id'] }}
    - metadata:
        suma-action-chain: True
  0707010000001B000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001500000000susemanager-sls/salt  0707010000001C000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002200000000susemanager-sls/salt/actionchains 0707010000001D000081B40000000000000000000000015EA152C40000010C000000000000000000000000000000000000003000000000susemanager-sls/salt/actionchains/resumessh.sls   resumessh:
    module.run:
    -   name: mgractionchains.resume
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - module: sync_modules
{%- endif %}

include:
  - util.synccustomall
0707010000001E000081B40000000000000000000000015EA152C400000144000000000000000000000000000000000000002F00000000susemanager-sls/salt/actionchains/startssh.sls    startssh:
    module.run:
    -   name: mgractionchains.start
    -   actionchain_id: {{ pillar.get('actionchain_id')}}
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - module: sync_modules
{%- endif %}

include:
  - util.synccustomall
0707010000001F000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/bootloader   07070100000020000081FD0000000000000000000000015EA152C4000001CC000000000000000000000000000000000000003900000000susemanager-sls/salt/bootloader/42_uyuni_reinstall.templ  #!/bin/sh

echo "menuentry \"{{ pillar.get('uyuni-reinstall-name') }}\" {"
if [ -d /sys/firmware/efi ] && [ "x${GRUB_USE_LINUXEFI}" = "xtrue" ]; then
    echo "    linuxefi /boot/uyuni-reinstall-kernel {{ pillar.get('uyuni-reinstall-kopts') }}"
    echo "    initrdefi /boot/uyuni-reinstall-initrd"
else
    echo "    linux /boot/uyuni-reinstall-kernel {{ pillar.get('uyuni-reinstall-kopts') }}"
    echo "    initrd /boot/uyuni-reinstall-initrd"
fi
echo "}"

07070100000021000081B40000000000000000000000015EA152C400000472000000000000000000000000000000000000003000000000susemanager-sls/salt/bootloader/autoinstall.sls   {% if pillar['uyuni-reinstall-kernel'] and pillar['uyuni-reinstall-initrd'] %}
mgr_copy_kernel:
  file.managed:
    - name: /boot/uyuni-reinstall-kernel
    - source: salt://bootloader/{{ pillar.get('uyuni-reinstall-kernel') }}

mgr_copy_initrd:
  file.managed:
    - name: /boot/uyuni-reinstall-initrd
    - source: salt://bootloader/{{ pillar.get('uyuni-reinstall-initrd') }}

mgr_create_grub2_entry:
  file.managed:
    - name: /etc/grub.d/42_uyuni_reinstall
    - source: salt://bootloader/42_uyuni_reinstall.templ
    - template: jinja
    - mode: 0755

mgr_set_default_boot:
  file.replace:
    - name: /etc/default/grub
    - pattern: GRUB_DEFAULT=.*
    - repl: GRUB_DEFAULT={{ pillar.get('uyuni-reinstall-name') }}
    - require:
      - file: mgr_create_grub2_entry

mgr_generate_grubconf:
  cmd.run:
    - name: grub2-mkconfig -o /boot/grub2/grub.cfg
    - onchanges:
      - file: mgr_copy_kernel
      - file: mgr_copy_initrd
      - file: mgr_create_grub2_entry
      - file: mgr_set_default_boot

mgr_autoinstall_start:
  cmd.run:
    - name: shutdown -r +1
    - onchanges:
      - cmd: mgr_generate_grubconf

{% endif %}
  07070100000022000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001F00000000susemanager-sls/salt/bootstrap    07070100000023000081B40000000000000000000000015EA152C4000001D1000000000000000000000000000000000000002E00000000susemanager-sls/salt/bootstrap/bootstrap.repo # SUSE Manager bootstrap repository
# Do not edit this file, changes will be overwritten
{%- if grains['os_family'] == 'Debian' %}
deb [trusted=yes] {{bootstrap_repo_url}} bootstrap main
{%- else %}
[SUSE-Manager-Bootstrap]
name=SUSE-Manager-Bootstrap
type=rpm-md
baseurl={{bootstrap_repo_url}}
gpgcheck=0
enabled=1
autorefresh=1
keeppackages=0
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 8 %}
module_hotfixes=1
{%- endif %}
{%- endif %}
   07070100000024000081B40000000000000000000000015EA152C400001BDF000000000000000000000000000000000000002800000000susemanager-sls/salt/bootstrap/init.sls   # Make sure no SUSE Manager server aliasing left over from ssh-push via tunnel
mgr_server_localhost_alias_absent:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

# disable all susemanager:* repos
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

{% set os_base = 'sle' %}
# CentOS6 oscodename is bogus
{%- if "centos" in grains['os']|lower %}
{% set os_base = 'centos' %}
{%- elif "redhat" in grains['os']|lower %}
{% set os_base = 'res' %}
{%- elif "opensuse" in grains['oscodename']|lower %}
{% set os_base = 'opensuse' %}
{%- endif %}

{%- if grains['os_family'] == 'Suse' %}
{%- if "." in grains['osrelease'] %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osrelease'].replace('.', '/') ~ '/bootstrap/' %}
{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osrelease'] ~ '/0/bootstrap/' %}
{%- endif %}
{%- elif grains['os_family'] == 'RedHat' %}
{% if salt['file.file_exists' ]('/etc/redhat-release') %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/res/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ grains['osmajorrelease'] ~ '/bootstrap/' %}
{% endif %}
{%- elif grains['os_family'] == 'Debian' %}
{%- set osrelease = grains['osrelease'].split('.') %}
{%- if grains['os'] == 'Ubuntu' %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/ubuntu/' ~ osrelease[0] ~ '/' ~ osrelease[1].lstrip('0') ~ '/bootstrap/' %}
{%- else %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/debian/' ~ osrelease[0] ~ '/' ~ osrelease[1].lstrip('0') ~ '/bootstrap/' %}
{%- endif %}
{%- endif %}

{%- if not grains['os_family'] == 'Debian' %}

{%- set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
{# 901 is a special status code for the TLS issue with RHEL6 and SLE11. #}
{%- if bootstrap_repo_request['status'] == 901 %}
{{ raise(bootstrap_repo_request['error']) }}
{%- endif %}
{%- set bootstrap_repo_exists = (0 < bootstrap_repo_request['status'] < 300) %}

bootstrap_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: /etc/zypp/repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'RedHat' %}
    - name: /etc/yum.repos.d/susemanager:bootstrap.repo
{%- endif %}
    - source:
      - salt://bootstrap/bootstrap.repo
    - template: jinja
    - context:
      bootstrap_repo_url: {{bootstrap_repo_url}}
    - mode: 644
    - require:
      - host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
      - module: disable_repo_*
{%- endif %}
    - onlyif:
      - ([ {{ bootstrap_repo_exists }} = "True" ])

{%- else %}
{%- set bootstrap_repo_exists = (0 < salt['http.query'](bootstrap_repo_url + 'dists/bootstrap/Release', status=True, verify_ssl=False)['status'] < 300) %}
bootstrap_repo:
  file.managed:
    - name: /etc/apt/sources.list.d/susemanager_bootstrap.list
    - source:
      - salt://bootstrap/bootstrap.repo
    - template: jinja
    - context:
      bootstrap_repo_url: {{bootstrap_repo_url}}
    - mode: 644
    - require:
      - host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
      - module: disable_repo_*
{%- endif %}
    - onlyif:
      - ([ {{ bootstrap_repo_exists }} = "True" ])
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
trust_suse_manager_tools_rhel_gpg_key:
  cmd.run:
{%- if grains['osmajorrelease']|int == 6 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res6tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res6tools:name') }}
{%- elif grains['osmajorrelease']|int == 7 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res7tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res7tools:name') }}
{%- elif grains['osmajorrelease']|int == 8 %}
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res8tools:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res8tools:name') }}
{% else %}
    - name: /usr/bin/true
{%- endif %}
    - runas: root

trust_res_gpg_key:
  cmd.run:
    - name: rpm --import https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:res:file') }}
    - unless: rpm -q {{ salt['pillar.get']('gpgkeys:res:name') }}
    - runas: root

{%- elif grains['os_family'] == 'Debian' %}
{%- include 'channels/debiankeyring.sls' %}
trust_suse_manager_tools_deb_gpg_key:
  module.run:
    - name: pkg.add_repo_key
    - path: https://{{ salt['pillar.get']('mgr_server') }}/pub/{{ salt['pillar.get']('gpgkeys:ubuntutools:file') }}
{%- endif %}

salt-minion-package:
  pkg.installed:
    - name: salt-minion
    - require:
      - file: bootstrap_repo

/etc/salt/minion.d/susemanager.conf:
  file.managed:
    - source:
      - salt://bootstrap/susemanager.conf
    - template: jinja
    - mode: 644
    - require:
      - pkg: salt-minion-package

/etc/salt/minion_id:
  file.managed:
    - contents_pillar: minion_id
    - require:
      - pkg: salt-minion-package

include:
  - bootstrap.remove_traditional_stack

mgr_update_basic_pkgs:
  pkg.latest:
    - pkgs:
      - openssl
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] and grains['cpuarch'] in ['i586', 'x86_64'] %}
      - pmtools
{%- elif grains['cpuarch'] in ['aarch64', 'x86_64'] %}
      - dmidecode
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
      - zypper
{%- elif grains['os_family'] == 'RedHat' %}
      - yum
{%- endif %}

# Manage minion key files in case they are provided in the pillar
{% if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
/etc/salt/pki/minion/minion.pub:
  file.managed:
    - contents_pillar: minion_pub
    - mode: 644
    - makedirs: True
    - require:
      - pkg: salt-minion-package

/etc/salt/pki/minion/minion.pem:
  file.managed:
    - contents_pillar: minion_pem
    - mode: 400
    - makedirs: True
    - require:
      - pkg: salt-minion-package

salt-minion:
  service.running:
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: /etc/salt/minion_id
      - file: /etc/salt/pki/minion/minion.pem
      - file: /etc/salt/pki/minion/minion.pub
      - file: /etc/salt/minion.d/susemanager.conf
{% else %}
salt-minion:
  service.running:
    - enable: True
    - require:
      - pkg: salt-minion-package
      - host: mgr_server_localhost_alias_absent
    - watch:
      - file: /etc/salt/minion_id
      - file: /etc/salt/minion.d/susemanager.conf
{% endif %}
 07070100000025000081B40000000000000000000000015EA152C400000784000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootstrap/remove_traditional_stack.sls   # disable all spacewalk:* repos
{% set repos_disabled = {'match_str': 'spacewalk:', 'matching': true} %}
{%- include 'channels/disablelocalrepos.sls' %}

disable_spacewalksd:
  service.dead:
    - name: rhnsd
    - enable: False

disable_spacewalk-update-status:
  service.dead:
    - name: spacewalk-update-status
    - enable: False

disable_osad:
  service.dead:
    - name: osad
    - enable: False

remove_traditional_stack_all:
  pkg.removed:
    - pkgs:
      - spacewalk-check
      - spacewalk-client-setup
      - osad
      - osa-common
      - mgr-osad
      - spacewalksd
      - mgr-daemon
      - rhnlib
      - rhnmd
{%- if grains['os_family'] == 'Suse' %}
      - zypp-plugin-spacewalk
{%- elif grains['os_family'] == 'RedHat' %}
      - yum-rhn-plugin
      - rhnsd
      - rhn-check
      - rhn-setup
      - rhn-client-tools
{%- elif grains['os_family'] == 'Debian' %}
      - apt-transport-spacewalk
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - module: disable_repo*
{%- endif %}

remove_traditional_stack:
  pkg.removed:
    - pkgs:
      - spacewalk-client-tools
      - rhncfg
      - mgr-cfg
{%- if grains['os_family'] == 'Suse' %}
      - suseRegisterInfo
{%- endif %}
{%- if repos_disabled.count > 0 %}
    - require:
      - module: disable_repo*
{%- endif %}
    - unless: rpm -q spacewalk-proxy-common || rpm -q spacewalk-common

# only removing apt-transport-spacewalk above
# causes apt-get update to 'freeze' if this
# file is still present and referencing a
# method not present anymore.
{%- if grains['os_family'] == 'Debian' %}
remove_spacewalk_sources:
  file.absent:
    - name: /etc/apt/sources.list.d/spacewalk.list
{%- endif %}

# Remove suseRegisterInfo in a separate yum transaction to avoid being called by
# the yum plugin.
{%- if grains['os_family'] == 'RedHat' %}
remove_suse_register_info_rh:
  pkg.removed:
    - name: suseRegisterInfo
{%- endif %}
07070100000026000081B40000000000000000000000015EA152C400000171000000000000000000000000000000000000003000000000susemanager-sls/salt/bootstrap/susemanager.conf   # This file was generated by SUSE Manager
master: {{ pillar['mgr_server'] }}
server_id_use_crc: adler32
enable_legacy_startup_events: False
enable_fqdns_grains: False
{% if pillar['activation_key'] is defined %}
grains:
  susemanager:
    activation_key: {{ pillar['activation_key'] }}
{% endif %}
start_event_grains:
  - machine_id
  - saltboot_initrd
  - susemanager
   07070100000027000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001B00000000susemanager-sls/salt/certs    070701000000280000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/CAASP1.sls SLES12.sls  070701000000290000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS6.sls    RedHat6.sls 0707010000002A0000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS7.sls    RedHat7.sls 0707010000002B0000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/CentOS8.sls    RedHat7.sls 0707010000002C000081B40000000000000000000000015EA152C400000197000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/Debian9.sls    mgr_download_mgr_cert:
  file.managed:
    - name: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
    - makedirs: True
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

mgr_update_ca_certs:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
 0707010000002D0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_0.sls   SLES12.sls  0707010000002E0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap15_1.sls   SLES12.sls  0707010000002F0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap42_2.sls   SLES12.sls  070701000000300000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Leap42_3.sls   SLES12.sls  070701000000310000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES6.sls   RedHat6.sls 070701000000320000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES7.sls   RedHat7.sls 070701000000330000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/RES8.sls   RedHat7.sls 07070100000034000081B40000000000000000000000015EA152C40000021D000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat6.sls    enable_ca_store:
  cmd.run:
    - name: /usr/bin/update-ca-trust enable
    - runas: root
    - unless: "/usr/bin/update-ca-trust check | grep \"PEM/JAVA Status: ENABLED\""

/etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT
    - require:
      - cmd: enable_ca_store

update-ca-certificates:
  cmd.run:
    - name: /usr/bin/update-ca-trust extract
    - runas: root
    - onchanges:
      - file: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT
   07070100000035000081B40000000000000000000000015EA152C400000143000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat7.sls    /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

update-ca-certificates:
  cmd.run:
    - name: /usr/bin/update-ca-trust extract
    - runas: root
    - onchanges:
      - file: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT
 070701000000360000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002700000000susemanager-sls/salt/certs/RedHat8.sls    RedHat7.sls 070701000000370000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLED15.sls SLES12.sls  07070100000038000081B40000000000000000000000015EA152C400000181000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES11.sls /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

salt://certs/update-multi-cert.sh:
  cmd.wait_script:
    - runas: root
    - watch:
        - file: /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem

c_rehash:
  cmd.run:
    - name: /usr/bin/c_rehash
    - runas: root
    - onchanges:
      - file: /etc/ssl/certs/*
   07070100000039000081B40000000000000000000000015EA152C40000012F000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES12.sls /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT:
  file.managed:
    - source:
      - salt://certs/RHN-ORG-TRUSTED-SSL-CERT

update-ca-certificates:
  cmd.run:
    - name: /usr/sbin/update-ca-certificates
    - runas: root
    - onchanges:
      - file: /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
 0707010000003A0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SLES15.sls SLES12.sls  0707010000003B0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002A00000000susemanager-sls/salt/certs/SLES_SAP12.sls SLES12.sls  0707010000003C0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SUSE12.sls SLES12.sls  0707010000003D0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/SUSE15.sls SLES12.sls  0707010000003E0000A1FF0000000000000000000000015EA152C40000000A000000000000000000000000000000000000002A00000000susemanager-sls/salt/certs/Tumbleweed.sls SLES12.sls  0707010000003F0000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Ubuntu16.sls   Debian9.sls 070701000000400000A1FF0000000000000000000000015EA152C40000000B000000000000000000000000000000000000002800000000susemanager-sls/salt/certs/Ubuntu18.sls   Debian9.sls 07070100000041000081B40000000000000000000000015EA152C40000037D000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/init.sls   {% macro includesls(osfullname, osrelease) -%}
{% include 'certs/{0}.sls'.format(osfullname + osrelease.replace('.', '_')) ignore missing -%}
{%- endmacro %}
{% if grains['os_family'] == 'Suse' %}
{% if grains['osfullname'] == 'openSUSE Tumbleweed' %}
{% set sls = includesls('Tumbleweed', '') -%}
{% else -%}
{% set sls = includesls(grains['osfullname'], grains['osrelease']) -%}
{% endif -%}
{% if sls|trim != "" -%}
{{ sls }}
{% else -%}
{{ includesls(grains['osfullname'], grains['osrelease_info']|first|string) }}
{% endif -%}
{% elif grains['os_family'] == 'RedHat' %}
{% set sls = includesls(grains['os'], grains['osrelease']) -%}
{% if sls|trim != "" -%}
{{ sls }}
{% else -%}
{{ includesls(grains['os'], grains['osrelease_info']|first|string) }}
{% endif -%}
{% elif grains['os_family'] == 'Debian' %}
{{ includesls(grains['os'], grains['osrelease_info']|first|string) }}
{% endif %}
   07070100000042000081B40000000000000000000000015EA152C40000018B000000000000000000000000000000000000003000000000susemanager-sls/salt/certs/update-multi-cert.sh   CERT_DIR=/etc/ssl/certs
CERT_FILE=RHN-ORG-TRUSTED-SSL-CERT
TRUST_DIR=/etc/ssl/certs
rm -f $TRUST_DIR/${CERT_FILE}-*.pem
if [ -f $CERT_DIR/${CERT_FILE}.pem ]; then
    if [ $(grep -- "-----BEGIN CERTIFICATE-----" $CERT_DIR/${CERT_FILE}.pem | wc -l) -gt 1 ]; then
        csplit -b "%02d.pem" -f $TRUST_DIR/${CERT_FILE}- $CERT_DIR/${CERT_FILE}.pem '/-----BEGIN CERTIFICATE-----/' '{*}'
    fi
fi

 07070100000043000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/channels 07070100000044000081B40000000000000000000000015EA152C400000519000000000000000000000000000000000000002C00000000susemanager-sls/salt/channels/channels.repo   # Channels managed by SUSE Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {}).items() %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
{%- if grains['os_family'] == 'Debian' %}
deb {{ '[trusted=yes]' if not pillar.get('mgr_metadata_signing_enabled', false) else '[signed-by=/usr/share/keyrings/mgr-archive-keyring.gpg]' }} https://{{ args['token'] }}@{{ args['host'] }}:{{ args.get('port', 443)}}/rhn/manager/download {{ chan }} main
{%- else %}
[{{ args['alias'] }}]
name={{ args['name'] }}
enabled={{ args['enabled'] }}
autorefresh={{ args['autorefresh'] }}
{%- if grains['os_family'] == 'RedHat' %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}
susemanager_token={{ args['token'] }}
{%- else %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}?{{ args['token'] }}
{%- endif %}
type={{ args['type'] }}
gpgcheck={{ args['gpgcheck'] }}
repo_gpgcheck={{ args['repo_gpgcheck'] }}
pkg_gpgcheck={{ args['pkg_gpgcheck'] }}
{%- endif %}

{% endfor %}
   07070100000045000081B40000000000000000000000015EA152C4000000E3000000000000000000000000000000000000003000000000susemanager-sls/salt/channels/debiankeyring.sls   {%- if pillar.get('mgr_metadata_signing_enabled', false) %}
mgr_debian_repo_keyring:
  file.managed:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
    - source: salt://gpg/mgr-keyring.gpg
    - mode: 644
{%- endif %}
 07070100000046000081B40000000000000000000000015EA152C400000499000000000000000000000000000000000000003400000000susemanager-sls/salt/channels/disablelocalrepos.sls   # Disable all local repos matching or not matching the 'match_str'
# Default arguments: everything except *susemanager:*
{% if not repos_disabled is defined %}
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': false} %}
{% endif %}
{% do repos_disabled.update({'count': 0}) %}

{% set repos = salt['pkg.list_repos']() %}
{% for alias, data in repos.items() %}
{% if grains['os_family'] == 'Debian' %}
{% for entry in data %}
{% if (repos_disabled.match_str in entry['file'])|string == repos_disabled.matching|string and entry.get('enabled', True) %} 
disable_repo_{{ repos_disabled.count }}:
  module.run:
    - name: pkg.mod_repo
    - repo: {{ "'" ~ entry.line ~ "'" }}
    - kwargs:
        disabled: True
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endfor %}
{% else %}
{% if (repos_disabled.match_str in alias)|string == repos_disabled.matching|string and data.get('enabled', True) %}
disable_repo_{{ alias }}:
  module.run:
    - name: pkg.mod_repo
    - repo: {{ alias }}
    - kwargs:
        enabled: False
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endif %}
{% endfor %}

   07070100000047000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/dnf-susemanager-plugin  07070100000048000081B40000000000000000000000015EA152C400000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   07070100000049000081B40000000000000000000000015EA152C4000001E2000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.py import dnf

class Susemanager(dnf.Plugin):

    name = 'susemanager'

    def __init__(self, base, cli):
        super(Susemanager, self).__init__(base, cli)
        base.read_all_repos()
        for repo in base.repos.get_matching("susemanager:*"):
            try:
                susemanager_token = repo.cfg.getValue(section=repo.id, key="susemanager_token")
                repo.set_http_headers(["X-Mgr-Auth: %s" % susemanager_token])
            except:
                pass
  0707010000004A000081B40000000000000000000000015EA152C400000B17000000000000000000000000000000000000002700000000susemanager-sls/salt/channels/init.sls    {%- if grains['os_family'] == 'RedHat' %}

{%- set yum_version = salt['pkg.version']("yum") %}
{%- set is_yum = yum_version and salt['pkg.version_cmp'](yum_version, "4") < 0 %}
{%- set is_dnf = salt['pkg.version']("dnf") %}

{%- if is_dnf %}
mgrchannels_susemanagerplugin_dnf:
  file.managed:
    - name: /usr/lib/python{{ grains['pythonversion'][0] }}.{{ grains['pythonversion'][1] }}/site-packages/dnf-plugins/susemanagerplugin.py
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_dnf:
  file.managed:
    - name: /etc/dnf/plugins/susemanagerplugin.conf
    - source:
      - salt://channels/dnf-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644
{%- endif %}

{%- if is_yum %}
mgrchannels_susemanagerplugin_yum:
  file.managed:
    - name: /usr/share/yum-plugins/susemanagerplugin.py
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.py
    - user: root
    - group: root
    - mode: 644

mgrchannels_susemanagerplugin_conf_yum:
  file.managed:
    - name: /etc/yum/pluginconf.d/susemanagerplugin.conf
    - source:
      - salt://channels/yum-susemanager-plugin/susemanagerplugin.conf
    - user: root
    - group: root
    - mode: 644
{%- endif %}
{%- endif %}

mgrchannels_repo:
  file.managed:
{%- if grains['os_family'] == 'Suse' %}
    - name: "/etc/zypp/repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'RedHat' %}
    - name: "/etc/yum.repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'Debian' %}
    - name: "/etc/apt/sources.list.d/susemanager:channels.list"
{%- endif %}
    - source:
      - salt://channels/channels.repo
    - template: jinja
    - user: root
    - group: root
    - mode: 644
{%- if grains['os_family'] == 'RedHat' %}
    - require:
{%- if is_dnf %}
       - file: mgrchannels_susemanagerplugin_dnf
       - file: mgrchannels_susemanagerplugin_conf_dnf
{%- endif %}
{%- if is_yum %}
       - file: mgrchannels_susemanagerplugin_yum
       - file: mgrchannels_susemanagerplugin_conf_yum
{%- endif %}
{%- endif %}

{%- if grains['os_family'] == 'RedHat' %}
{%- if is_dnf %}
mgrchannels_dnf_clean_all:
  cmd.run:
    - name: /usr/bin/dnf clean all
    - runas: root
    - onchanges:
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/dnf repolist | grep \"repolist: 0$\""
{%- endif %}
{%- if is_yum %}
mgrchannels_yum_clean_all:
  cmd.run:
    - name: /usr/bin/yum clean all
    - runas: root
    - onchanges: 
       - file: "/etc/yum.repos.d/susemanager:channels.repo"
    -  unless: "/usr/bin/yum repolist | grep \"repolist: 0$\""
{%- endif %}
{%- elif grains['os_family'] == 'Debian' %}
{%- include 'channels/debiankeyring.sls' %}
{%- endif %}
 0707010000004B000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/yum-susemanager-plugin  0707010000004C000081B40000000000000000000000015EA152C400000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.conf   [main]
enabled=1
   0707010000004D000081B40000000000000000000000015EA152C4000001B2000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.py from yum.plugins import TYPE_CORE
from yum import config

requires_api_version = '2.5'
plugin_type = TYPE_CORE


def config_hook(conduit):
    config.RepoConf.susemanager_token = config.Option()


def init_hook(conduit):
    for repo in conduit.getRepos().listEnabled():
        susemanager_token = getattr(repo, 'susemanager_token', None)
        if susemanager_token:
            repo.http_headers['X-Mgr-Auth'] = susemanager_token
  0707010000004E000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002400000000susemanager-sls/salt/cleanup_minion   0707010000004F000081B40000000000000000000000015EA152C40000023D000000000000000000000000000000000000002D00000000susemanager-sls/salt/cleanup_minion/init.sls  {%- if grains['os_family'] == 'RedHat' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/yum.repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
mgrchannels_repo_clean_all:
  file.absent:
    - name: /etc/zypp/repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Debian' %}
mgrchannels_repo_clean_channels:
  file.absent:
    - name: /etc/apt/sources.list.d/susemanager:channels.list
mgrchannels_repo_clean_keyring:
  file.absent:
    - name: /usr/share/keyrings/mgr-archive-keyring.gpg
{%- endif %}
   07070100000050000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002800000000susemanager-sls/salt/cleanup_ssh_minion   07070100000051000081B40000000000000000000000015EA152C4000005D4000000000000000000000000000000000000003100000000susemanager-sls/salt/cleanup_ssh_minion/init.sls  include:
    - cleanup_minion

{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove server to localhost aliasing from /etc/hosts
mgr_remove_mgr_server_localhost_alias:
  host.absent:
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}
{%- endif %}

# remove server ssh authorization
mgr_remove_mgr_ssh_identity:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/mgr_ssh_id.pub

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove proxy ssh authorization (if any)
mgr_remove_proxy_ssh_identity:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
{%- endif %}

{%- if salt['pillar.get']('mgr_sudo_user') and salt['pillar.get']('mgr_sudo_user') != 'root' %}
{%- set home = '/home/' ~ salt['pillar.get']('mgr_sudo_user') %}
{%- else %}
{%- set home = '/root' %}
{%- endif %}

# remove own key authorization
mgr_no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: {{ home }}/.ssh/mgr_own_id.pub

# remove own keys
mgr_remove_own_ssh_pub_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - ssh_auth: mgr_no_own_key_authorized

mgr_remove_own_ssh_key:
  file.absent:
    - name: {{ home }}/.ssh/mgr_own_id
07070100000052000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/configuration    07070100000053000081B40000000000000000000000015EA152C4000002D5000000000000000000000000000000000000003400000000susemanager-sls/salt/configuration/deploy_files.sls   {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000054000081B40000000000000000000000015EA152C4000002D5000000000000000000000000000000000000003200000000susemanager-sls/salt/configuration/diff_files.sls {% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}

file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
    file.managed:
{% elif file.type == 'directory' %}
    file.directory:
{% elif file.type == 'symlink' %}
    file.symlink:
{% endif %}
    -   name: {{ file.name }}
    -   makedirs: True
{% if file.type == 'file' %}
    -   source: {{ file.source }}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'directory' %}
    -   user: {{ file.user }}
    -   group: {{ file.group }}
    -   mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
    -   target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}

   07070100000055000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/custom   07070100000056000081B40000000000000000000000015EA152C400000036000000000000000000000000000000000000002500000000susemanager-sls/salt/custom/init.sls  include:
  - custom.custom_{{ grains['machine_id'] }}
  07070100000057000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/custom_groups    07070100000058000081B40000000000000000000000015EA152C400000091000000000000000000000000000000000000002C00000000susemanager-sls/salt/custom_groups/init.sls   {% if pillar.get('group_ids', []) -%}
include:
{% for gid in pillar.get('group_ids', []) -%}
  - custom.group_{{ gid }}
{% endfor %}
{% endif %}
   07070100000059000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/custom_org   0707010000005A000081B40000000000000000000000015EA152C400000060000000000000000000000000000000000000002900000000susemanager-sls/salt/custom_org/init.sls  {% if pillar['org_id'] is defined %}
include:
  - custom.org_{{ pillar['org_id'] }}
{% endif %}
0707010000005B000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002100000000susemanager-sls/salt/distupgrade  0707010000005C000081B40000000000000000000000015EA152C4000001C2000000000000000000000000000000000000002A00000000susemanager-sls/salt/distupgrade/init.sls {% if grains['os_family'] == 'Suse' %}
spmigration:
  module.run:
    - name: pkg.upgrade
    - dist_upgrade: True
    - dryrun: {{ salt['pillar.get']('susemanager:distupgrade:dryrun', False) }}
{% if grains['osrelease_info'][0] >= 12 %}
    - novendorchange: True
{% else %}
    - fromrepo: {{ salt['pillar.get']('susemanager:distupgrade:channels', []) }}
{% endif %}
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
  0707010000005D000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/hardware 0707010000005E000081B40000000000000000000000015EA152C400000536000000000000000000000000000000000000003000000000susemanager-sls/salt/hardware/profileupdate.sls   
{%- if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64', 'aarch64'] %}
mgr_install_dmidecode:
  pkg.installed:
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] %}
    - name: pmtools
{%- else %}
    - name: dmidecode
{%- endif %}
{%- endif %}

grains:
  module.run:
    - name: grains.items
cpuinfo:
  module.run:
    - name: status.cpuinfo
udev:
  module.run:
    - name: udev.exportdb
network-interfaces:
  module.run:
    - name: network.interfaces
network-ips:
  module.run:
    - name: sumautil.primary_ips
network-modules:
  module.run:
    - name: sumautil.get_net_modules

{% if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64'] %}
smbios-records-bios:
  module.run:
    - name: smbios.records
    - rec_type: 0
    - clean: False
smbios-records-system:
  module.run:
    - name: smbios.records
    - rec_type: 1
    - clean: False
smbios-records-baseboard:
  module.run:
    - name: smbios.records
    - rec_type: 2
    - clean: False
smbios-records-chassis:
  module.run:
    - name: smbios.records
    - rec_type: 3
    - clean: False
{% elif grains['cpuarch'] in ['s390', 's390x'] %}
mainframe-sysinfo:
  module.run:
    - name: mainframesysinfo.read_values
{% endif %}
{% if 'network.fqdns' in salt %}
fqdns:
  module.run:
    - name: network.fqdns
{% endif%}
  0707010000005F000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/images   07070100000060000081B40000000000000000000000015EA152C400000606000000000000000000000000000000000000002700000000susemanager-sls/salt/images/docker.sls    {% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login:
  module.run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_buildimage:
  module.run:
    - name: docker.build
{%- if pillar.get('imagerepopath') is defined %}
    - repository: "{{ pillar.get('imagerepopath') }}"
    - tag: "{{ pillar.get('imagetag', 'latest') }}"
{%- else %}
    - repository: "{{ pillar.get('imagename') }}"
    - tag: "{{ pillar.get('imagename').rsplit(':', 1)[1] }}"
{%- endif %}
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
    - require:
      - module: mgr_registries_login

mgr_pushimage:
  module.run:
    - name: docker.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - module: mgr_buildimage
      - module: mgr_registries_login

{% else %}

mgr_registries_login:
  module.run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_buildimage:
  module.run:
    - name: dockerng.build
    - image: "{{ pillar.get('imagename') }}"
    - path: "{{ pillar.get('builddir') }}"
    - buildargs:
        repo: "{{ pillar.get('repo') }}"
        cert: "{{ pillar.get('cert') }}"
    - require:
      - module: mgr_registries_login

mgr_pushimage:
  module.run:
    - name: dockerng.push
    - image: "{{ pillar.get('imagename') }}"
    - require:
      - module: mgr_buildimage
      - module: mgr_registries_login

{% endif %}
  07070100000061000081B40000000000000000000000015EA152C40000160B000000000000000000000000000000000000003100000000susemanager-sls/salt/images/kiwi-image-build.sls  # SUSE Manager for Retail build trigger
#

{%- set source     = pillar.get('source') %}

{%- set kiwi_dir   = '/var/lib/Kiwi/' %}
{%- set common_repo = kiwi_dir + 'repo' %}

{%- set root_dir   = kiwi_dir + pillar.get('build_id') %}
{%- set source_dir = root_dir + '/source' %}
{%- set chroot_dir = root_dir + '/chroot/' %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
# cache dir is used only with Kiwi-ng
{%- set cache_dir  = root_dir + '/cache/' %}
{%- set bundle_id  = pillar.get('build_id') %}
{%- set activation_key = pillar.get('activation_key') %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}

mgr_buildimage_prepare_source:
  file.directory:
    - name: {{ root_dir }}
    - clean: True
  module.run:
    - name: kiwi_source.prepare_source
    - source: {{ source }}
    - root: {{ root_dir }}

mgr_buildimage_prepare_activation_key_in_source:
  file.managed:
    - name: {{ source_dir }}/root/etc/salt/minion.d/kiwi_activation_key.conf
    - makedirs: True
    - contents: |
        grains:
          susemanager:
            activation_key: {{ activation_key }}

{%- if use_kiwi_ng %}
# KIWI NG
#
{%- set kiwi = 'kiwi-ng' %}

{%- set profile_opt = '' %}
{%- if pillar.get('kiwi_profile') %}
{%-   set profile_opt = '--profile ' + pillar.get('kiwi_profile') %}
{%- endif %}

{%- macro kiwi_params() -%}
  --ignore-repos-used-for-build --add-repo file:{{ common_repo }},rpm-dir,common_repo,90,false,false --add-bootstrap-package rhn-org-trusted-ssl-cert-osimage {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }},rpm-md,key_repo{{ loop.index }},90,false,false {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/prepare.log --shared-cache-dir={{ cache_dir }} {{ profile_opt }} system prepare --description {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - module: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --logfile={{ root_dir }}/create.log --shared-cache-dir={{ cache_dir }} {{ profile_opt }} system create --root {{ chroot_dir }} --target-dir  {{ dest_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} result bundle --target-dir {{ dest_dir }} --id {{ bundle_id }} --bundle-dir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create


{%- else %}
# KIWI Legacy
#

{%- set kiwi_help = salt['cmd.run']('kiwi --help') %}
{%- set have_bundle_build = kiwi_help.find('--bundle-build') > 0 %}

# i586 build on x86_64 host must be called with linux32
# let's consider the build i586 if there is no x86_64 repo specified
{%- set kiwi = 'linux32 kiwi' if (pillar.get('kiwi_repositories')|join(' ')).find('x86_64') == -1 and grains.get('osarch') == 'x86_64' else 'kiwi' %}

# in SLES11 Kiwi the --add-repotype is required
{%- macro kiwi_params() -%}
  --add-repo {{ common_repo }} --add-repotype rpm-dir --add-repoalias common_repo {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
  --add-repo {{ repo }} --add-repotype rpm-md --add-repoalias key_repo{{ loop.index }} {{ ' ' }}
{%- endfor -%}
{%- endmacro %}

# old Kiwi can't change cache location, so we have to clear cache before each build
mgr_kiwi_clear_cache:
  file.directory:
    - name: /var/cache/kiwi/
    - makedirs: True
    - clean: True

mgr_buildimage_kiwi_prepare:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --force-new-root --prepare {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
    - require:
      - module: mgr_buildimage_prepare_source
      - file: mgr_buildimage_prepare_activation_key_in_source

mgr_buildimage_kiwi_create:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --create {{ chroot_dir }} --dest {{ dest_dir }} {{ kiwi_params() }}"
    - require:
      - cmd: mgr_buildimage_kiwi_prepare

{%- if have_bundle_build %}
mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "{{ kiwi }} --nocolor --yes --bundle-build {{ dest_dir }} --bundle-id {{ bundle_id }} --destdir {{ bundle_dir }}"
    - require:
      - cmd: mgr_buildimage_kiwi_create

{%- else %}

# SLE11 Kiwi does not have --bundle-build option, we have to create the bundle tarball ourselves:

mgr_buildimage_kiwi_bundle_dir:
  file.directory:
    - name: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_create

mgr_buildimage_kiwi_bundle_tarball:
  cmd.run:
    - name: "cd '{{ dest_dir }}' && tar czf '{{ bundle_dir }}'`basename *.packages .packages`-{{ bundle_id }}.tgz --no-recursion `find . -maxdepth 1 -type f`"
    - require:
      - file: mgr_buildimage_kiwi_bundle_dir

mgr_buildimage_kiwi_bundle:
  cmd.run:
    - name: "cd '{{ bundle_dir }}' && sha256sum *.tgz > `echo *.tgz`.sha256"
    - require:
      - cmd: mgr_buildimage_kiwi_bundle_tarball

{%- endif %}

{%- endif %}


{%- if pillar.get('use_salt_transport') %}
mgr_buildimage_kiwi_collect_image:
  module.run:
    - name: cp.push_dir
    - path: {{ bundle_dir }}
    - require:
      - cmd: mgr_buildimage_kiwi_bundle
{%- endif %}

mgr_buildimage_info:
  module.run:
    - name: kiwi_info.image_details
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}
    - require:
{%- if pillar.get('use_salt_transport') %}
      - mgr_buildimage_kiwi_collect_image
{%- else %}
      - mgr_buildimage_kiwi_bundle
{%- endif %}
 07070100000062000081B40000000000000000000000015EA152C40000024F000000000000000000000000000000000000003300000000susemanager-sls/salt/images/kiwi-image-inspect.sls    # SUSE Manager for Retail build trigger
#
{%- set root_dir   = '/var/lib/Kiwi/' + pillar.get('build_id') %}
{%- set dest_dir   = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
{%- set bundle_id  = pillar.get('build_id') %}

# the goal is to collect all information required for
# saltboot image pillar

mgr_inspect_kiwi_image:
  module.run:
    - name: kiwi_info.inspect_image
    - dest: {{ dest_dir }}
    - bundle_dest: {{ bundle_dir }}

mgr_kiwi_cleanup:
  cmd.run:
    - name: "rm -rf '{{ root_dir }}'"
    - require:
      - module: mgr_inspect_kiwi_image
 07070100000063000081B40000000000000000000000015EA152C400000816000000000000000000000000000000000000002E00000000susemanager-sls/salt/images/profileupdate.sls {% set container_name = salt['pillar.get']('mgr_container_name', 'mgr_container_' ~ range(1, 10000) | random )  %}

{% if grains['saltversioninfo'][0] >= 2018 %}

mgr_registries_login_inspect:
  module.run:
    - name: docker.login
    - registries: {{ pillar.get('docker-registries', {}).keys() | list }}

mgr_image_profileupdate:
  module.run:
    - name: docker.sls_build
    - repository: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - module: mgr_registries_login_inspect

mgr_image_inspect:
  module.run:
    - name: docker.inspect_image
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - module: mgr_registries_login_inspect

mgr_container_remove:
  module.run:
    - name: docker.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  module.run:
    - name: docker.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% else %}

mgr_registries_login_inspect:
  module.run:
    - name: dockerng.login
    - registries: {{ pillar.get('docker-registries', {}).keys() }}

mgr_image_profileupdate:
  module.run:
    - name: dockerng.sls_build
    - m_name: "{{ container_name }}"
    - base: "{{ pillar.get('imagename') }}"
    - mods: packages.profileupdate
    - dryrun: True
    - kwargs:
        entrypoint: ""
    - require:
      - module: mgr_registries_login_inspect

mgr_image_inspect:
  module.run:
    - name: dockerng.inspect
    - m_name: "{{ pillar.get('imagename') }}"
    - require:
      - module: mgr_registries_login_inspect

mgr_container_remove:
  module.run:
    - name: dockerng.rm
    - args: [ "{{ container_name }}" ]
    - force: False
    - onlyif:
      - docker ps -a | grep "{{ container_name }}" >/dev/null

mgr_image_remove:
  module.run:
    - name: dockerng.rmi
    - m_names:
      - "{{ pillar.get('imagename') }}"
    - force: False

{% endif %}
  07070100000064000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/packages 07070100000065000081B40000000000000000000000015EA152C4000001AA000000000000000000000000000000000000002700000000susemanager-sls/salt/packages/init.sls    {%- if grains['os_family'] == 'Suse' and grains['osmajorrelease']|int > 11 %}
mgr_install_products:
  product.all_installed:
    - refresh: True
    - require:
      - file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_states
{%- else %}
      - module: sync_states
{%- endif %}
{%- endif %}

include:
  - util.syncstates
  - .packages_{{ grains['machine_id'] }}
  07070100000066000081B40000000000000000000000015EA152C40000014B000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/patchdownload.sls   {% if pillar.get('param_patches', []) %}
pkg_downloaded-patches:
  pkg.patch_downloaded:
    - advisory_ids:
{%- for patch in pillar.get('param_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - require:
      - module: applychannels
{% endif %}

applychannels:
    module.run:
    -  name: state.apply
    -  mods: channels
 07070100000067000081B40000000000000000000000015EA152C400000489000000000000000000000000000000000000002F00000000susemanager-sls/salt/packages/patchinstall.sls    {% if grains.get('saltversioninfo', []) < [2015, 8, 12] %}
{{ salt.test.exception("You are running an old version of salt-minion that does not support patching. Please update salt-minion and try again.") }}
{% endif %}

{% if pillar.get('param_update_stack_patches', []) %}
mgr_update_stack_patches:
  pkg.patch_installed:
    - refresh: true
    - advisory_ids:
{%- for patch in pillar.get('param_update_stack_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% endif %}

{% if pillar.get('param_regular_patches', []) %}
mgr_regular_patches:
  pkg.patch_installed:
{% if not pillar.get('param_update_stack_patches', []) %}
    - refresh: true
{% endif %}
    - advisory_ids:
{%- for patch in pillar.get('param_regular_patches', []) %}
      - {{ patch }}
{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    - require:
        - file: mgrchannels*
{% if pillar.get('param_update_stack_patches', []) %}
        - pkg: mgr_update_stack_patches
{% endif %}
{% endif %}

include:
  - channels
   07070100000068000081B40000000000000000000000015EA152C400000207000000000000000000000000000000000000002E00000000susemanager-sls/salt/packages/pkgdownload.sls {% if pillar.get('param_pkgs') %}
pkg_downloaded:
  pkg.downloaded:
    - pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    - require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
 07070100000069000081B40000000000000000000000015EA152C4000002F6000000000000000000000000000000000000002D00000000susemanager-sls/salt/packages/pkginstall.sls  {% if pillar.get('param_pkgs') %}
pkg_installed:
  pkg.installed:
    -   refresh: true
{%- if grains['os_family'] == 'Debian' %}
    - skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
{%- endif %}
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}

{%- endfor %}
    - diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
  0707010000006A000081B40000000000000000000000015EA152C400000205000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgremove.sls   {% if pillar.get('param_pkgs') %}
pkg_removed:
  pkg.removed:
    -   pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
    {%- if grains['os_family'] == 'Debian' %}
        - {{ pkg }}:{{ arch }}: {{ version }}
    {%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
        - {{ pkg }}.{{ arch }}: {{ version }}
    {%- else %}
        - {{ pkg }}: {{ version }}
    {%- endif %}
{%- endfor %}
    -   require:
        - file: mgrchannels*
{% endif %}

include:
  - channels
   0707010000006B000081B40000000000000000000000015EA152C4000004D0000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/profileupdate.sls   packages:
  module.run:
    - name: pkg.info_installed
    - kwargs: {
          attr: 'arch,epoch,version,release,install_date_time_t',
{%- if grains.get('__suse_reserved_pkg_all_versions_support', False) %}
          errors: report,
          all_versions: true
{%- else %}
          errors: report
{%- endif %}
      }
{% if grains['os_family'] == 'Suse' %}
products:
  module.run:
    - name: pkg.list_products
{% elif grains['os_family'] == 'RedHat' %}
{% include 'packages/redhatproductinfo.sls' %}
{% elif grains['os_family'] == 'Debian' %}
debianrelease:
  cmd.run:
    - name: cat /etc/os-release
    - onlyif: test -f /etc/os-release
{% endif %}

include:
  - util.syncgrains
  - util.syncmodules

grains_update:
  module.run:
    - name: grains.items
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_grains
{%- else %}
      - module: sync_grains
{%- endif %}

{% if not pillar.get('imagename') %}
kernel_live_version:
  module.run:
    - name: sumautil.get_kernel_live_version
    - require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
      - saltutil: sync_modules
{%- else %}
      - module: sync_modules
{%- endif %}
{% endif %}
0707010000006C000081B40000000000000000000000015EA152C400000192000000000000000000000000000000000000003400000000susemanager-sls/salt/packages/redhatproductinfo.sls   {% if grains['os_family'] == 'RedHat' %}
rhelrelease:
  cmd.run:
    - name: cat /etc/redhat-release
    - onlyif: test -f /etc/redhat-release
centosrelease:
  cmd.run:
    - name: cat /etc/centos-release
    - onlyif: test -f /etc/centos-release
respkgquery:
  cmd.run:
    - name: rpm -q --whatprovides 'sles_es-release-server'
    - onlyif: rpm -q --whatprovides 'sles_es-release-server'
{% endif %}  0707010000006D000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002400000000susemanager-sls/salt/remotecommands   0707010000006E000081B40000000000000000000000015EA152C4000000DC000000000000000000000000000000000000002D00000000susemanager-sls/salt/remotecommands/init.sls  remote_command:
  cmd.script:
    - source: {{ pillar.get('mgr_remote_cmd_script') }}
    - runas: {{ pillar.get('mgr_remote_cmd_runas', 'root') }}
    - timeout: {{ pillar.get('mgr_remote_cmd_timeout') }}
    # TODO GID0707010000006F000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/scap 07070100000070000081B40000000000000000000000015EA152C400000064000000000000000000000000000000000000002300000000susemanager-sls/salt/scap/init.sls    mgr_scap:
  module.run:
    - name: openscap.xccdf
    - params: {{ pillar.get('mgr_scap_params') }}07070100000071000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/services 07070100000072000081B40000000000000000000000015EA152C40000037C000000000000000000000000000000000000002900000000susemanager-sls/salt/services/docker.sls  {% if pillar['addon_group_types'] is defined and 'container_build_host' in pillar['addon_group_types'] %}
mgr_install_docker:
  pkg.installed:
    - pkgs:
      - git-core
      - docker: '>=1.9.0'
{%- if grains['pythonversion'][0] == 3 %}
    {%- if grains['osmajorrelease'] == 12 %}
      - python3-docker-py: '>=1.6.0'
    {%- else %}
      - python3-docker: '>=1.6.0'
    {%- endif %}
{%- else %}
      - python-docker-py: '>=1.6.0'
{%- endif %}
{%- if grains['saltversioninfo'][0] >= 2018 %}
    {%- if grains['osmajorrelease'] == 12 %}
      - python3-salt
    {%- else %}
      - python2-salt
    {%- endif %}
{%- endif %}

mgr_docker_service:
  service.running:
    - name: docker
    - enable: True
    - require:
      - pkg: mgr_install_docker

mgr_min_salt:
  pkg.installed:
    - pkgs:
      - salt: '>=2016.11.1'
      - salt-minion: '>=2016.11.1'
    - order: last
{% endif %}
07070100000073000081B40000000000000000000000015EA152C4000009B3000000000000000000000000000000000000003400000000susemanager-sls/salt/services/kiwi-image-server.sls   # Image Server installation state - part of SUSE Manager for Retail
#
# Copyright (c) 2017 - 2019 SUSE LLC

{% if pillar['addon_group_types'] is defined and 'osimage_build_host' in pillar['addon_group_types'] %}
{% set kiwi_dir = '/var/lib/Kiwi' %}

# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere
{%- set use_kiwi_ng = not (salt['grains.get']('osfullname') == 'SLES' and salt['grains.get']('osmajorrelease')|int() < 15) %}

{%- if use_kiwi_ng %}
{% set kiwi_boot_modules = ['kiwi-boot-descriptions'] %}

mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - python3-kiwi
{% for km in kiwi_boot_modules %}
      - {{ km }}
{% endfor %}

{%- else %}
# legacy kiwi

{% set kiwi_boot_modules = ['kiwi-desc-netboot', 'kiwi-desc-saltboot', 'kiwi-desc-vmxboot', 'kiwi-desc-oemboot', 'kiwi-desc-isoboot'] %}
{% set available_packages = salt['pkg.search']('kiwi*').keys() %}

mgr_install_kiwi:
  pkg.installed:
    - pkgs:
      - kiwi
{% for km in kiwi_boot_modules %}
    {% if km in available_packages %}
      - {{ km }}
    {% endif %}
{% endfor %}
{% endif %}

mgr_kiwi_build_tools:
  pkg.installed:
    - pkgs:
      - git-core

mgr_kiwi_dir_created:
  file.directory:
    - name: {{ kiwi_dir }}
    - user: root
    - group: root
    - dir_mode: 755

# repo for common kiwi build needs - mainly RPM with SUSE Manager certificate
mgr_kiwi_dir_repo_created:
  file.directory:
    - name: {{ kiwi_dir }}/repo
    - user: root
    - group: root
    - dir_mode: 755

mgr_osimage_cert_deployed:
  file.managed:
{%- if grains.get('osfullname') == 'SLES' and grains.get('osmajorrelease') == '11' %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
{%- else %}
    - name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
    - source: salt://images/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
{%- endif %}

mgr_sshd_installed_enabled:
  pkg.installed:
    - name: openssh
  service.running:
    - name: sshd
    - enable: True

mgr_sshd_public_key_copied:
  file.append:
    - name: /root/.ssh/authorized_keys
    - source: salt://salt_ssh/mgr_ssh_id.pub
    - makedirs: True
    - require:
      - pkg: mgr_sshd_installed_enabled

mgr_saltutil_synced:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_all
{%- else %}
  module.run:
    - name: saltutil.sync_all
{%- endif %}

{% endif %}
 07070100000074000081B40000000000000000000000015EA152C4000003EA000000000000000000000000000000000000002E00000000susemanager-sls/salt/services/salt-minion.sls include:
  - bootstrap.remove_traditional_stack

{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}

{# management keys should be used only once #}
{# removed to prevent trouble on the next regular minion restart #}
mgr_remove_management_key_grains:
  file.replace:
    - name: /etc/salt/minion.d/susemanager.conf
    - pattern: '^\s*management_key:.*$'
    - repl: ''
    - onlyif: grep 'management_key:' /etc/salt/minion.d/susemanager.conf

{# activation keys are only usefull on first registration #}
{# removed to prevent trouble on the next regular minion restart #}
mgr_remove_activation_key_grains:
  file.replace:
    - name: /etc/salt/minion.d/susemanager.conf
    - pattern: '^\s*activation_key:.*$'
    - repl: ''
    - onlyif: grep 'activation_key:' /etc/salt/minion.d/susemanager.conf

mgr_salt_minion:
  pkg.installed:
    - name: salt-minion
    - order: last
  service.running:
    - name: salt-minion
    - enable: True
    - order: last
{% endif %}
  07070100000075000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/srvmonitoring    07070100000076000081B40000000000000000000000015EA152C400000812000000000000000000000000000000000000002F00000000susemanager-sls/salt/srvmonitoring/disable.sls    node_exporter_service:
  service.dead:
    - name: prometheus-node_exporter
    - enable: False

postgres_exporter_service:
  service.dead:
    - name: prometheus-postgres_exporter
    - enable: False

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_tomcat_config:
  cmd.run:
    - name: grep -q -v -- '-Dcom.sun.management.jmxremote.port=3333' /etc/sysconfig/tomcat && grep -q -v -- '-Dcom.sun.management.jmxremote.ssl=false' /etc/sysconfig/tomcat && grep -q -v -- '-Dcom.sun.management.jmxremote.authenticate=false' /etc/sysconfig/tomcat && grep -q -v -- '-Djava.rmi.server.hostname=' /etc/sysconfig/tomcat
    - require:
      - cmd: remove_tomcat_jmx_*

jmx_exporter_tomcat_service:
  service.dead:
    - name: prometheus-jmx_exporter@tomcat
    - enable: False
    - require:
      - cmd: jmx_tomcat_config

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_taskomatic_config:
  cmd.run:
    - name: grep -q -v -- '-Dcom.sun.management.jmxremote.port=3333' /etc/rhn/taskomatic.conf && grep -q -v -- '-Dcom.sun.management.jmxremote.ssl=false' /etc/rhn/taskomatic.conf && grep -q -v -- '-Dcom.sun.management.jmxremote.authenticate=false' /etc/rhn/taskomatic.conf && grep -q -v -- '-Djava.rmi.server.hostname=' /etc/rhn/taskomatic.conf
    - require:
      - cmd: remove_taskomatic_jmx_*

jmx_exporter_taskomatic_service:
  service.dead:
    - name: prometheus-jmx_exporter@taskomatic
    - enable: False
    - require:
      - cmd: jmx_taskomatic_config

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 0/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 0' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_disabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 0' /etc/rhn/rhn.conf  07070100000077000081B40000000000000000000000015EA152C400000F24000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/enable.sls node_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info golang-github-prometheus-node_exporter

node_exporter_service:
  service.running:
    - name: prometheus-node_exporter
    - enable: True
    - require:
      - cmd: node_exporter

postgres_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info golang-github-wrouesnel-postgres_exporter

postgres_exporter_configuration:
  file.managed:
    - name: /etc/postgres_exporter/postgres_exporter_queries.yaml
    - makedirs: True
    - source:
      - salt://srvmonitoring/postgres_exporter_queries.yaml
    - user: root
    - group: root
    - mode: 644

postgres_exporter_service:
  file.managed:
    - name: /etc/sysconfig/prometheus-postgres_exporter
    - source: salt://srvmonitoring/prometheus-postgres_exporter
    - template: jinja
    - user: root
    - group: root
    - mode: 644
    - require:
      - cmd: postgres_exporter
      - file: postgres_exporter_configuration
  service.running:
    - name: prometheus-postgres_exporter
    - enable: True
    - require:
      - file: postgres_exporter_service
    - watch:
      - file: postgres_exporter_configuration

jmx_exporter:
  cmd.run:
    - name: /usr/bin/rpm --query --info prometheus-jmx_exporter prometheus-jmx_exporter-tomcat

{% set remove_jmx_props = {'service': 'tomcat', 'file': '/etc/sysconfig/tomcat'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_tomcat_config:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)"/JAVA_OPTS="\1 -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname={{ grains['fqdns'][0] }}"/' /etc/sysconfig/tomcat
    - require:
      - cmd: remove_tomcat_jmx_*

jmx_exporter_tomcat_service:
  service.running:
    - name: prometheus-jmx_exporter@tomcat
    - enable: True
    - require:
      - cmd: jmx_exporter
      - cmd: jmx_tomcat_config

jmx_exporter_taskomatic_systemd_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/taskomatic/environment
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - contents: |
        PORT="5557"
        EXP_PARAMS=""

{% set remove_jmx_props = {'service': 'taskomatic', 'file': '/etc/rhn/taskomatic.conf'} %}
{%- include 'srvmonitoring/removejmxprops.sls' %}

jmx_taskomatic_config:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)"/JAVA_OPTS="\1 -Dcom.sun.management.jmxremote.port=3334 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname={{ grains['fqdns'][0] }}"/' /etc/rhn/taskomatic.conf
    - require:
      - cmd: remove_taskomatic_jmx_*

jmx_exporter_taskomatic_yaml_config:
  file.managed:
    - name: /etc/prometheus-jmx_exporter/taskomatic/prometheus-jmx_exporter.yml
    - makedirs: True
    - user: root
    - group: root
    - mode: 644
    - contents: |
        hostPort: localhost:3334
        username:
        password:
        whitelistObjectNames:
          - java.lang:type=Threading,*
          - java.lang:type=Memory,*
          - Catalina:type=ThreadPool,name=*
        rules:
        - pattern: ".*"

jmx_exporter_taskomatic_service:
  service.running:
    - name: prometheus-jmx_exporter@taskomatic
    - enable: True
    - require:
      - cmd: jmx_exporter
      - cmd: jmx_taskomatic_config
      - file: jmx_exporter_taskomatic_systemd_config
      - file: jmx_exporter_taskomatic_yaml_config

mgr_enable_prometheus_self_monitoring:
  cmd.run:
    - name: grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 1/' /etc/rhn/rhn.conf || echo 'prometheus_monitoring_enabled = 1' >> /etc/rhn/rhn.conf

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -qF 'prometheus_monitoring_enabled = 1' /etc/rhn/rhn.conf

07070100000078000081B40000000000000000000000015EA152C40000044E000000000000000000000000000000000000004200000000susemanager-sls/salt/srvmonitoring/postgres_exporter_queries.yaml mgr_serveractions:
  query: |
    SELECT (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Queued'
       )
    ) AS queued,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name = 'Picked Up'
       )
    ) AS picked_up,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Completed')
       )
    ) AS completed,
    (
      SELECT COUNT(*)
        FROM rhnServerAction
        WHERE status = (
          SELECT id FROM rhnActionStatus WHERE name IN ('Failed')
       )
    ) AS failed;
  metrics:
    - queued:
        usage: "GAUGE"
        description: "Count of queued Actions"
    - picked_up:
        usage: "GAUGE"
        description: "Count of picked up Actions"
    - completed:
        usage: "COUNTER"
        description: "Count of completed Actions"
    - failed:
        usage: "COUNTER"
        description: "Count of failed Actions"
  07070100000079000081B40000000000000000000000015EA152C400000324000000000000000000000000000000000000004000000000susemanager-sls/salt/srvmonitoring/prometheus-postgres_exporter   ## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        "postgresql://user:passwd@localhost:5432/database?sslmode=disable"
## ServiceRestart: postgres-exporter
#
# Connection URL to postgresql instance
#
DATA_SOURCE_NAME="postgresql://{{ pillar['db_user'] }}:{{ pillar['db_pass'] }}@{{ pillar['db_host'] }}:{{ pillar['db_port'] }}/{{ pillar['db_name'] }}?sslmode=disable"

## Path:           Applications/PostgreSQLExporter
## Description:    Prometheus exporter for PostgreSQL
## Type:           string()
## Default:        ""
## ServiceRestart: postgres-exporter
#
# Extra options for postgres-exporter
#
POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml"
0707010000007A000081B40000000000000000000000015EA152C400000481000000000000000000000000000000000000003600000000susemanager-sls/salt/srvmonitoring/removejmxprops.sls remove_{{remove_jmx_props.service}}_jmx_port:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Dcom\.sun\.management\.jmxremote\.port=[0-9]*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -E -- '-Dcom\.sun\.management\.jmxremote\.port=[0-9]+' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_ssl:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.ssl=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.ssl=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_auth:
  cmd.run:
    - name: sed -i 's/JAVA_OPTS="\(.*\)-Dcom\.sun\.management\.jmxremote\.authenticate=false\(.*\)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Dcom.sun.management.jmxremote.authenticate=false' {{remove_jmx_props.file}}

remove_{{remove_jmx_props.service}}_jmx_hostname:
  cmd.run:
    - name: sed -ri 's/JAVA_OPTS="(.*)-Djava\.rmi\.server\.hostname=\S*(.*)"/JAVA_OPTS="\1 \2"/' {{remove_jmx_props.file}}
    - onlyif: grep -F -- '-Djava.rmi.server.hostname=' {{remove_jmx_props.file}}   0707010000007B000081B40000000000000000000000015EA152C4000004DE000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/status.sls jmx_taskomatic_exporter_service:
  module.run:
    - name: service.status
    - m_name: "prometheus-jmx_exporter@taskomatic.service"

jmx_tomcat_exporter_service:
  module.run:
    - name: service.status
    - m_name: "prometheus-jmx_exporter@tomcat.service"

node_exporter_service:
  module.run:
    - name: service.status
    - m_name: "prometheus-node_exporter.service"

postgres_exporter_service:
  module.run:
    - name: service.status
    - m_name: "prometheus-postgres_exporter.service"

jmx_tomcat_java_config:
  module.run:
    - name: file.search
    - path: /etc/sysconfig/tomcat
    - pattern: "-Dcom\\.sun\\.management\\.jmxremote\\.port=3333 -Dcom\\.sun\\.management\\.jmxremote\\.ssl=false -Dcom\\.sun\\.management\\.jmxremote\\.authenticate=false -Djava\\.rmi\\.server\\.hostname="

jmx_taskomatic_java_config:
  module.run:
    - name: file.search
    - path: /etc/rhn/taskomatic.conf
    - pattern: "-Dcom\\.sun\\.management\\.jmxremote\\.port=3334 -Dcom\\.sun\\.management\\.jmxremote\\.ssl=false -Dcom\\.sun\\.management\\.jmxremote\\.authenticate=false -Djava\\.rmi\\.server\\.hostname="

mgr_is_prometheus_self_monitoring_enabled:
  cmd.run:
    - name: grep -q 'prometheus_monitoring_enabled\s*=\s*1\s*$' /etc/rhn/rhn.conf  0707010000007C000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/ssh_bootstrap    0707010000007D000081B40000000000000000000000015EA152C4000007CB000000000000000000000000000000000000002C00000000susemanager-sls/salt/ssh_bootstrap/init.sls   mgr_ssh_identity:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/mgr_ssh_id.pub
{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
mgr_server_localhost_alias_present:
  host.present:
{% else %}
mgr_server_localhost_alias_absent:
  host.absent:
{% endif %}
    - ip:
      - 127.0.0.1
    - names:
      - {{ salt['pillar.get']('mgr_server') }}

{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
no_push_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - comment: susemanager-ssh-push

proxy_ssh_identity:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
    - require:
      - ssh_auth: no_push_key_authorized
{%- endif %}

{%- if salt['pillar.get']('mgr_sudo_user') and salt['pillar.get']('mgr_sudo_user') != 'root' %}
{%- set home = '/home/' ~ salt['pillar.get']('mgr_sudo_user') %}
{%- else %}
{%- set home = '/root' %}
{%- endif %}

generate_own_ssh_key:
  cmd.run:
    - name: ssh-keygen -N '' -C 'susemanager-own-ssh-push' -f {{ home }}/.ssh/mgr_own_id -t rsa -q
    - creates: {{ home }}/.ssh/mgr_own_id.pub

ownership_own_ssh_key:
  file.managed:
    - name: {{ home }}/.ssh/mgr_own_id
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - require:
      - cmd: generate_own_ssh_key

no_own_key_authorized:
  ssh_auth.absent:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - comment: susemanager-own-ssh-push
    - require:
      - file: ownership_own_ssh_key

authorize_own_key:
  ssh_auth.present:
    - user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
    - source: {{ home }}/.ssh/mgr_own_id.pub
    - require:
      - file: ownership_own_ssh_key
      - ssh_auth: no_own_key_authorized
include:
  - bootstrap.remove_traditional_stack
 0707010000007E000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/util 0707010000007F000081B40000000000000000000000015EA152C400000113000000000000000000000000000000000000003600000000susemanager-sls/salt/util/mgr_disable_fqdns_grain.sls mgr_disable_fqdns_grains:
  file.append:
    - name: /etc/salt/minion.d/susemanager.conf
    - text: "enable_fqdns_grains: False"

mgr_salt_minion:
  service.running:
   - name: salt-minion
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_fqdns_grains
 07070100000080000081B40000000000000000000000015EA152C400000168000000000000000000000000000000000000003700000000susemanager-sls/salt/util/mgr_mine_config_clean_up.sls    {%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
mgr_disable_mine:
  file.managed:
    - name: /etc/salt/minion.d/susemanager-mine.conf
    - contents: "mine_enabled: False"

mgr_salt_minion:
  service.running:
   - name: salt-minion
   - enable: True
   - order: last
   - watch:
     - file: mgr_disable_mine
{% endif %}
07070100000081000081B40000000000000000000000015EA152C4000000CC000000000000000000000000000000000000003500000000susemanager-sls/salt/util/mgr_start_event_grains.sls  mgr_start_event_grains:
  file.append:
    - name: /etc/salt/minion.d/susemanager.conf
    - text: |
        start_event_grains:
          - machine_id
          - saltboot_initrd
          - susemanager
07070100000082000081B40000000000000000000000015EA152C40000001B000000000000000000000000000000000000002300000000susemanager-sls/salt/util/noop.sls    mgr_do_nothing:
  test.nop
 07070100000083000081B40000000000000000000000015EA152C4000000B6000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncbeacons.sls sync_beacons:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_beacons
{%- else %}
  module.run:
    - name: saltutil.sync_beacons
{%- endif %}
  07070100000084000081B40000000000000000000000015EA152C40000005B000000000000000000000000000000000000002C00000000susemanager-sls/salt/util/synccustomall.sls   include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
 07070100000085000081B40000000000000000000000015EA152C4000000CE000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncgrains.sls  sync_grains:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_grains:
{%- else %}
  module.run:
    - name: saltutil.sync_grains
{%- endif %}
    - reload_grains: true
  07070100000086000081B40000000000000000000000015EA152C4000000B6000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncmodules.sls sync_modules:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_modules
{%- else %}
  module.run:
    - name: saltutil.sync_modules
{%- endif %}
  07070100000087000081B40000000000000000000000015EA152C4000000B6000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncstates.sls  sync_states:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
  saltutil.sync_states
{%- else %}
  module.run:
      - name: saltutil.sync_states
{%- endif %}

  07070100000088000081B40000000000000000000000015EA152C4000000E9000000000000000000000000000000000000002900000000susemanager-sls/salt/util/systeminfo.sls  include:
  - util.syncmodules
  - util.syncstates
  - util.syncgrains
  - util.syncbeacons
status_uptime:
  module.run:
    - name: status.uptime
grains_update:
  module.run:
    - name: grains.item
    - args:
      - kernelrelease
   07070100000089000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/virt 0707010000008A000081B40000000000000000000000015EA152C4000005F9000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/create-vm.sls   domain_define:
    virt.running:
        - name: {{ pillar['name'] }}
        - cpu: {{ pillar['vcpus'] }}
        - mem: {{ pillar['mem'] // 1024 }}
        - os_type: {{ pillar['os_type'] }}
        - arch: {{ pillar['arch'] }}
        - vm_type: {{ pillar['vm_type'] }}
        - disks:
{% for disk in pillar['disks'] %}
            - name: {{ disk['name'] }}
              model: {{ disk['model'] }}
    {% if 'device' in disk %}
              device: {{ disk['device'] }}
    {% endif %}
    {% if 'type' in disk %}
              type: {{ disk['type'] }}
    {% endif %}
    {% if 'format' in disk %}
              format: {{ disk['format'] }}
    {% endif %}
    {% if 'source_file' in disk %}
              source_file: {{ disk['source_file'] if disk['source_file'] != '' else 'null' }}
    {% endif %}
    {% if 'pool' in disk %}
              pool: {{ disk['pool'] }}
    {% endif %}
    {% if 'size' in disk %}
              size: {{ disk['size'] }}
    {% endif %}
    {% if 'image' in disk %}
              image: {{ disk['image'] }}
    {% endif %}
{% endfor %}
{% if 'interfaces' in pillar %}
        - interfaces:
    {% for nic in pillar['interfaces'] %}
            - name: {{ nic['name'] }}
              type: {{ nic['type'] }}
              source: {{ nic['source'] }}
        {% if 'mac' in nic %}
              mac: {{ nic['mac'] if nic['mac'] != '' else 'null' }}
        {% endif %}
    {% endfor %}
{% endif %}
        - graphics:
            type: {{ pillar['graphics']['type'] }}
        - seed: False
   0707010000008B000081B40000000000000000000000015EA152C4000000CF000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/deleted.sls vm_stopped:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

mgr_virt_destroy:
  module.run:
    - name: virt.purge
    - vm_: {{ pillar['domain_name'] }}
    - require:
      - virt: vm_stopped
 0707010000008C000081B40000000000000000000000015EA152C400000113000000000000000000000000000000000000002C00000000susemanager-sls/salt/virt/engine-events.sls   {% if pillar['virt_entitled'] %}
/etc/salt/minion.d/libvirt-events.conf:
  file.managed:
    - contents: |
        engines:
          - libvirt_events

/var/cache/virt_state.cache:
  file.absent

{% else %}

/etc/salt/minion.d/libvirt-events.conf:
  file.absent

{% endif %}
 0707010000008D000081B40000000000000000000000015EA152C4000000B6000000000000000000000000000000000000002400000000susemanager-sls/salt/virt/reset.sls   powered_off:
  virt.powered_off:
    - name: {{ pillar['domain_name'] }}

restarted:
  virt.running:
    - name: {{ pillar['domain_name'] }}
    - require:
      - virt: powered_off
  0707010000008E000081B40000000000000000000000015EA152C40000005E000000000000000000000000000000000000002600000000susemanager-sls/salt/virt/resumed.sls mgr_virt_resume:
  module.run:
    - name: virt.resume
    - vm_: {{ pillar['domain_name'] }}
  0707010000008F000081B40000000000000000000000015EA152C40000009F000000000000000000000000000000000000002500000000susemanager-sls/salt/virt/setmem.sls  mgr_virt_mem:
  module.run:
    - name: virt.setmem
    - vm_: {{ pillar['domain_name'] }}
    - memory: {{ pillar['domain_mem'] // 1024 }}
    - config: True
 07070100000090000081B40000000000000000000000015EA152C40000009C000000000000000000000000000000000000002700000000susemanager-sls/salt/virt/setvcpus.sls    mgr_virt_vcpus:
  module.run:
    - name: virt.setvcpus
    - vm_: {{ pillar['domain_name'] }}
    - vcpus: {{ pillar['domain_vcpus'] }}
    - config: True
07070100000091000081B40000000000000000000000015EA152C400000043000000000000000000000000000000000000002A00000000susemanager-sls/salt/virt/statechange.sls {{ pillar['domain_name'] }}:
    virt.{{ pillar['domain_state'] }}
 07070100000092000081B40000000000000000000000015EA152C40000005E000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/suspended.sls   mgr_virt_suspend:
  module.run:
    - name: virt.pause
    - vm_: {{ pillar['domain_name'] }}
  07070100000093000081B40000000000000000000000015EA152C4000005EB000000000000000000000000000000000000002800000000susemanager-sls/salt/virt/update-vm.sls   domain_update:
    module.run:
        - name: virt.update
        - m_name: {{ pillar['name'] }}
        - cpu: {{ pillar['vcpus'] }}
        - mem: {{ pillar['mem'] // 1024 }}
{% if 'disks' in pillar %}
        - disks:
    {% for disk in pillar['disks'] %}
            - name: {{ disk['name'] }}
              model: {{ disk['model'] }}
        {% if 'device' in disk %}
              device: {{ disk['device'] }}
        {% endif %}
        {% if 'type' in disk %}
              type: {{ disk['type'] }}
        {% endif %}
        {% if 'format' in disk %}
              format: {{ disk['format'] }}
        {% endif %}
        {% if 'source_file' in disk %}
              source_file: {{ disk['source_file'] if disk['source_file'] != '' else 'null' }}
        {% endif %}
        {% if 'pool' in disk %}
              pool: {{ disk['pool'] }}
        {% endif %}
        {% if 'size' in disk %}
              size: {{ disk['size'] }}
        {% endif %}
        {% if 'image' in disk %}
              image: {{ disk['image'] }}
        {% endif %}
    {% endfor %}
{% endif %}
{% if 'interfaces' in pillar %}
        - interfaces:
    {% for nic in pillar['interfaces'] %}
            - name: {{ nic['name'] }}
              type: {{ nic['type'] }}
              source: {{ nic['source'] }}
        {% if 'mac' in nic %}
              mac: {{ nic['mac'] if nic['mac'] != '' else 'null' }}
        {% endif %}
    {% endfor %}
{% endif %}
        - graphics:
            type: {{ pillar['graphics']['type'] }}
 07070100000094000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001500000000susemanager-sls/scap  07070100000095000081B40000000000000000000000015EA152C400001532000000000000000000000000000000000000002A00000000susemanager-sls/scap/xccdf-resume.xslt.in <?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright 2012 Red Hat Inc., Durham, North Carolina. All Rights Reserved.

This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License.

This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
details.

You should have received a copy of the GNU Lesser General Public License along
with this library; if not, write to the Free Software Foundation, Inc., 59
Temple Place, Suite 330, Boston, MA  02111-1307 USA

Authors:
     Simon Lukasik <slukasik@redhat.com>
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
    xmlns:cdf1="http://checklists.nist.gov/xccdf/1.1"
    xmlns:cdf2="http://checklists.nist.gov/xccdf/1.2">
    <xsl:output method="xml" encoding="UTF-8"/>

    <xsl:template match="/">
        <benchmark-resume>
            <xsl:apply-templates select="*[local-name()='Benchmark']"/>
        </benchmark-resume>
    </xsl:template>

    <xsl:template match="cdf1:Benchmark | cdf2:Benchmark">
        <xsl:copy-of select="@id"/>
        <xsl:attribute name="version">
            <xsl:value-of select="normalize-space(cdf1:version/text()|cdf2:version/text())"/>
        </xsl:attribute>

        <xsl:variable name="profileId" select="cdf1:TestResult[1]/cdf1:profile/@idref | cdf2:TestResult[1]/cdf2:profile/@idref"/>
        <xsl:choose>
            <xsl:when test="not($profileId)"/> <!-- Do not send profile element when scanning with 'default' profile. -->
            <xsl:when test="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]">
                <xsl:apply-templates select="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]"/>
            </xsl:when>
            <xsl:otherwise>
                <profile title="Tailored profile">
                    <xsl:attribute name="id">
                         <xsl:value-of select="$profileId"/>
                    </xsl:attribute>
                </profile>
            </xsl:otherwise>
        </xsl:choose>
        <xsl:apply-templates select="cdf1:TestResult[1] | cdf2:TestResult[1]"/>
    </xsl:template>

    <xsl:template match="cdf1:Profile | cdf2:Profile">
        <profile>
            <xsl:attribute name="title">
                <xsl:value-of select="normalize-space(cdf1:title/text() | cdf2:title/text())"/>
            </xsl:attribute>
            <xsl:copy-of select="@id"/>
            <xsl:attribute name="description">
                <xsl:value-of select="normalize-space(cdf1:description[@xml:lang='en-US']/text() | cdf2:description[@xml:lang='en-US']/text())"/>
            </xsl:attribute>
        </profile>
    </xsl:template>

    <xsl:template match="cdf1:TestResult | cdf2:TestResult">
        <TestResult>
            <xsl:copy-of select="@id"/>
            <xsl:copy-of select="@start-time"/>
            <xsl:copy-of select="@end-time"/>
            <pass>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'pass'] | cdf2:rule-result[cdf2:result = 'pass']"/>
            </pass>
            <fail>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fail'] | cdf2:rule-result[cdf2:result = 'fail']"/>
            </fail>
            <error>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'error'] | cdf2:rule-result[cdf2:result = 'error']"/>
            </error>
            <unknown>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'unknown'] | cdf2:rule-result[cdf2:result = 'unknown']"/>
            </unknown>
            <notapplicable>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notapplicable'] | cdf2:rule-result[cdf2:result = 'notapplicable']"/>
            </notapplicable>
            <notchecked>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notchecked'] | cdf2:rule-result[cdf2:result = 'notchecked']"/>
            </notchecked>
            <notselected>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notselected'] | cdf2:rule-result[cdf2:result = 'notselected']"/>
            </notselected>
            <informational>
                   <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'informational'] | cdf2:rule-result[cdf2:result = 'informational']"/>
            </informational>
            <fixed>
                    <xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fixed'] | cdf2:rule-result[cdf2:result = 'fixed']"/>
            </fixed>
        </TestResult>
    </xsl:template>

    <xsl:template match="cdf1:rule-result | cdf2:rule-result">
        <rr>
            <xsl:attribute name="id">
                <xsl:value-of select="normalize-space(@idref)"/>
            </xsl:attribute>
            <xsl:apply-templates select="cdf1:ident | cdf2:ident"/>
        </rr>
    </xsl:template>

    <xsl:template match="cdf1:ident | cdf2:ident">
        <ident>
            <xsl:copy-of select="@system"/>
            <xsl:value-of select="normalize-space(text())"/>
        </ident>
    </xsl:template>
</xsl:stylesheet>
  07070100000096000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001400000000susemanager-sls/src   07070100000097000081B40000000000000000000000015EA152C4000001B2000000000000000000000000000000000000001E00000000susemanager-sls/src/README.md ## Python Code Maintenance

Test are written with PyTest. This way:

1. Create your "test_foo.py" file.

2. Import with double-dot your package,
   so it will be included in the sys path, e.g.:

   from ..beacons import pkgset

3. Create a test function "def test_my_foo(..."

4. Rock-n-roll by simply calling "py.test".


Don't mind `.cache` and `__pycache__` directories,
they are ignored in an explicit `.gitignore`.

Have fun. :)
  07070100000098000081B40000000000000000000000015EA152C400000000000000000000000000000000000000000000002000000000susemanager-sls/src/__init__.py   07070100000099000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001C00000000susemanager-sls/src/beacons   0707010000009A000081B40000000000000000000000015EA152C400000000000000000000000000000000000000000000002800000000susemanager-sls/src/beacons/__init__.py   0707010000009B000081B40000000000000000000000015EA152C4000006C4000000000000000000000000000000000000002600000000susemanager-sls/src/beacons/pkgset.py # -*- coding: utf-8 -*-
'''
Watch libzypp/RPM database via cookies and fire
an event to the SUSE Manager if that has been changed.

Author: Bo Maryniuk <bo@suse.de>
'''

from __future__ import absolute_import
import os
import logging
log = logging.getLogger(__name__)


__virtualname__ = 'pkgset'


def __virtual__():
    return (
        os.path.exists("/usr/lib/zypp/plugins/commit/susemanager") or  # Remove this once 2015.8.7 not in use
        os.path.exists("/usr/lib/zypp/plugins/commit/zyppnotify") or
        os.path.exists("/usr/share/yum-plugins/susemanagerplugin.py") or  # Remove this once 2015.8.7 not in use
        os.path.exists("/usr/share/yum-plugins/yumnotify.py")
    ) and __virtualname__ or False


def validate(config):
    '''
    Validate the beacon configuration. A "cookie" file path is mandatory.
    '''

    if not config.get('cookie'):
        return False, 'Cookie path has not been set.'

    return True, 'Configuration validated'


def beacon(config):
    '''
    Watch the cookie file from libzypp's plugin. If its content changes, fire an event to the Master.

    Example Config

    .. code-block:: yaml

        beacons:
          pkgset:
            cookie: /path/to/cookie/file
            interval: 5

    '''

    ret = []
    if os.path.exists(config.get('cookie', '')):
        with open(config.get('cookie')) as ck_file:
            ck_data = ck_file.read().strip()
            if __virtualname__ not in __context__:
                __context__[__virtualname__] = ck_data
            if __context__[__virtualname__] != ck_data:
                ret.append({
                    'tag': 'changed'
                })
                __context__[__virtualname__] = ck_data

    return ret
0707010000009C000081B40000000000000000000000015EA152C4000033A6000000000000000000000000000000000000002A00000000susemanager-sls/src/beacons/virtpoller.py # -*- coding: utf-8 -*-
#
# Copyright (c) 2008--2014 Red Hat, Inc.
# Copyright (c) 2016 SUSE LLC
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
'''
Watch libvirt and fire events with changes to virtual machines

Author: Michael Calmer <mc@suse.com>
'''

from __future__ import absolute_import
import sys
import os
import logging
log = logging.getLogger(__name__)

try:
    import libvirt  # pylint: disable=import-error
    from libvirt import libvirtError
    HAS_LIBVIRT = True
except ImportError:
    HAS_LIBVIRT = False
    libvirt = None


try:
    import cPickle as pickle
except ImportError:
    import pickle
import time
import traceback
import binascii

CACHE_DATA_PATH = '/var/cache/virt_state.cache'
CACHE_EXPIRE_SECS = 60 * 60 * 6   # 6 hours, in seconds

##
# This structure maps the libvirt state enumeration to labels that
# SUSE Manager understands.
# Reasons we don't care about differences between NOSTATE, RUNNING and BLOCKED:
# 1. technically, the domain is still "running"
# 2. RHN Classic / Red Hat Satellite / Spacewalk are not able to
# display 'blocked' & 'nostate'
#    as valid states
# 3. to avoid 'Abuse of Service' messages: bugs #230106 and #546676

VIRT_STATE_NAME_MAP = ( 'running',  # VIR_DOMAIN_NOSTATE
                        'running',  # VIR_DOMAIN_RUNNING
                        'running',  # VIR_DOMAIN_BLOCKED
                        'paused',   # VIR_DOMAIN_PAUSED
                        'stopped',  # VIR_DOMAIN_SHUTDOWN
                        'stopped',  # VIR_DOMAIN_SHUTOFF
                        'crashed')  # VIR_DOMAIN_CRASHED

class EventType:
    EXISTS      = 'exists'
    REMOVED     = 'removed'
    FULLREPORT  = 'fullreport'

class TargetType:
    SYSTEM      = 'system'
    DOMAIN      = 'domain'

class VirtualizationType:
    PARA  = 'para_virtualized'
    FULLY = 'fully_virtualized'

class PropertyType:
    NAME        = 'name'
    UUID        = 'uuid'
    TYPE        = 'virt_type'
    MEMORY      = 'memory_size'
    VCPUS       = 'vcpus'
    STATE       = 'state'
    IDENTITY    = 'identity'
    ID          = 'id'
    MESSAGE     = 'message'

__virtualname__ = 'virtpoller'


###############################################################################
# PollerStateCache Class
###############################################################################

class PollerStateCache:

    ###########################################################################
    # Public Interface
    ###########################################################################

    def __init__(self, domain_data, cache_file = CACHE_DATA_PATH,
            expire_time = CACHE_EXPIRE_SECS):
        """
        This method creates a new poller state based on the provided domain
        list.  The domain_data list should be in the form returned from
        poller.poll_hypervisor.  That is,

             { uuid : { 'name'        : '...',
                        'uuid'        : '...',
                        'virt_type'   : '...',
                        'memory_size' : '...',
                        'vcpus'       : '...',
                        'state'       : '...' }, ... }
        """
        self.__expire_time = expire_time
        self.__cache_file = cache_file

        # Start by loading the old state, if necessary.
        self._load_state()
        self.__new_domain_data = domain_data

        # Now compare the given domain_data against the one loaded in the old
        # state.
        self._compare_domain_data()

        log.debug("Added: %s"    % repr(self.__added))
        log.debug("Removed: %s"  % repr(self.__removed))
        log.debug("Modified: %s" % repr(self.__modified))

    def save(self):
        """
        Updates the cache on disk with the latest domain data.
        """
        self._save_state()

    def is_expired(self):
        """
        Returns true if this cache is expired.
        """
        if self.__expire_time is None:
            return False
        else:
            return int(time.time()) >= self.__expire_time

    def is_changed(self):
        return self.__added or self.__removed or self.__modified

    def get_added(self):
        """
        Returns a list of uuids for each domain that has been added since the
        last state poll.
        """
        return self.__added

    def get_modified(self):
        """
        Returns a list of uuids for each domain that has been modified since
        the last state poll.
        """
        return self.__modified

    def get_removed(self):
        """
        Returns a list of uuids for each domain that has been removed since
        the last state poll.
        """
        return self.__removed

    ###########################################################################
    # Helper Methods
    ###########################################################################

    def _load_state(self):
        """
        Loads the last hypervisor state from disk.
        """
        # Attempt to open up the cache file.
        cache_file = None
        try:
            cache_file = open(self.__cache_file, 'rb')
        except IOError as ioe:
            # Couldn't open the cache file.  That's ok, there might not be one.
            # We'll only complain if debugging is enabled.
            log.debug("Could not open cache file '{0}': {1}".format(
                self.__cache_file, str(ioe)))

        # Now, if a previous state was cached, load it.
        state = {}
        if cache_file:
            try:
                state = pickle.load(cache_file)
            except pickle.PickleError as pe:
                # Strange.  Possibly, the file is corrupt.  We'll load an empty
                # state instead.
                log.debug("Error occurred while loading state: {0}".format(str(pe)))
            except EOFError:
                log.debug("Unexpected EOF. Probably an empty file.")
                cache_file.close()

            cache_file.close()

        if state:
            log.debug("Loaded state: {0}".format(repr(state)))

            self.__expire_time = int(state['expire_time'])

            # If the cache is expired, set the old data to None so we force
            # a refresh.
            if self.is_expired():
                self.__old_domain_data = None
                os.unlink(self.__cache_file)
            else:
                self.__old_domain_data = state['domain_data']

        else:
            self.__old_domain_data = None
            self.__expire_time     = None

    def _save_state(self):
        """
        Saves the given polling state to disk.
        """
        # First, ensure that the proper parent directory is created.
        cache_dir_path = os.path.dirname(self.__cache_file)
        if not os.path.exists(cache_dir_path):
            os.makedirs(cache_dir_path, 0o700)

        state = {}
        state['domain_data'] = self.__new_domain_data
        if self.__expire_time is None or self.is_expired():
            state['expire_time'] = int(time.time()) + CACHE_EXPIRE_SECS
        else:
            state['expire_time'] = self.__expire_time

        # Now attempt to open the file for writing.  We'll just overwrite
        # whatever's already there.  Also, let any exceptions bounce out.
        cache_file = open(self.__cache_file, "wb")
        pickle.dump(state, cache_file)
        cache_file.close()

    def _compare_domain_data(self):
        """
        Compares the old domain_data to the new domain_data.  Returns a tuple
        of lists, relative to the new domain_data:

            (added, removed, modified)
        """
        self.__added    = {}
        self.__removed  = {}
        self.__modified = {}

        # First, figure out the modified and added uuids.
        if self.__new_domain_data:
            for (uuid, new_properties) in list(self.__new_domain_data.items()):
                if not self.__old_domain_data or \
                    uuid not in self.__old_domain_data:

                    self.__added[uuid] = self.__new_domain_data[uuid]
                else:
                    old_properties = self.__old_domain_data[uuid]
                    if old_properties != new_properties:
                        self.__modified[uuid] = self.__new_domain_data[uuid]

        # Now, figure out the removed uuids.
        if self.__old_domain_data:
            for uuid in list(self.__old_domain_data.keys()):
                if not self.__new_domain_data or \
                    uuid not in self.__new_domain_data:

                    self.__removed[uuid] = self.__old_domain_data[uuid]


###############################################################################
### beacon                                                                  ###
###############################################################################

def __virtual__():
    return HAS_LIBVIRT and __virtualname__ or False


def validate(config):
    '''
    Validate the beacon configuration.
    '''
    if not isinstance(config, dict):
        return False, ('Configuration for virtpoller '
                       'beacon must be a dictionary.')
    else:
        return True, 'Configuration validated'


def beacon(config):
    '''
    polls the hypervisor for information about the currently
    running set of domains.

    Example Config

    .. code-block:: yaml

        beacons:
          virtpoller:
            expire_time: 21600
            cache_file: '/var/cache/virt_state.cache'
            interval: 320
    '''

    ret = []

    if not libvirt:
        log.trace("no libvirt")
        return ret

    try:
        conn = libvirt.openReadOnly(None)
    except libvirt.libvirtError as lve:
        log.error("Warning: Could not retrieve virtualization information! libvirtd service needs to be running.")
        conn = None

    if not conn:
        # No connection to hypervisor made
        return ret

    domains = conn.listAllDomains(0)

    state = {}
    for domain in domains:
        uuid = binascii.hexlify(domain.UUID())
        # SEE: http://libvirt.org/html/libvirt-libvirt.html#virDomainInfo
        # for more info.
        domain_info = domain.info()

        # Set the virtualization type.  We can tell if the domain is fully virt
        # by checking the domain's OSType() attribute.
        virt_type = VirtualizationType.PARA
        if domain.OSType().lower() == 'hvm':
            virt_type = VirtualizationType.FULLY

        # we need to filter out the small per/minute KB changes
        # that occur inside a vm.  To do this we divide by 1024 to
        # drop our precision down to megabytes with an int then
        # back up to KB
        memory = int(domain_info[2] / 1024);
        memory = memory * 1024;
        properties = {
            PropertyType.NAME   : domain.name(),
            PropertyType.UUID   : uuid,
            PropertyType.TYPE   : virt_type,
            PropertyType.MEMORY : str(memory), # current memory
            PropertyType.VCPUS  : domain_info[3],
            PropertyType.STATE  : VIRT_STATE_NAME_MAP[domain_info[0]] }

        state[uuid] = properties

    poller_state = PollerStateCache(state,
                                    cache_file = config.get('cache_file', CACHE_DATA_PATH),
                                    expire_time = config.get('expire_time', CACHE_EXPIRE_SECS))

    plan = []
    if poller_state.is_changed():
        added    = poller_state.get_added()
        removed  = poller_state.get_removed()
        modified = poller_state.get_modified()

        if poller_state.is_expired():
            item = {'time': int(time.time()),
                    'event_type': EventType.FULLREPORT,
                    'target_type': TargetType.DOMAIN }
            plan.append(item)

        for (uuid, data) in list(added.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.EXISTS,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

        for (uuid, data) in list(modified.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.EXISTS,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

        for (uuid, data) in list(removed.items()):
            item = {'time': int(time.time()),
                    'event_type': EventType.REMOVED,
                    'target_type': TargetType.DOMAIN,
                    'guest_properties': data}
            plan.append(item)

    poller_state.save()
    if len(plan) > 0:
        ret.append({'plan': plan})
    return ret
  0707010000009D000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001B00000000susemanager-sls/src/grains    0707010000009E000081B40000000000000000000000015EA152C400000000000000000000000000000000000000000000002700000000susemanager-sls/src/grains/__init__.py    0707010000009F000081B40000000000000000000000015EA152C400000F05000000000000000000000000000000000000002600000000susemanager-sls/src/grains/cpuinfo.py import logging
import salt.modules.cmdmod
import salt.utils
import os
import re
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def _lscpu(feedback):
    '''
    Use lscpu method

    :return:
    '''
    lscpu = _which_bin(['lscpu'])
    if lscpu is not None:
        try:
            log.debug("Trying lscpu to get CPU socket count")
            ret = __salt__['cmd.run_all']('{0} -p'.format(lscpu), output_loglevel='quiet')
            if ret['retcode'] == 0:
                max_socket_index = -1
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue
                    socket_index = int(line.split(',')[2])
                    if socket_index > max_socket_index:
                        max_socket_index = socket_index
                if max_socket_index > -1:
                    return {'cpusockets': (1 + max_socket_index)}
        except Exception as error:
            feedback.append("lscpu: {0}".format(str(error)))
            log.debug(str(error))


def _parse_cpuinfo(feedback):
    '''
    Use parsing /proc/cpuinfo method.

    :return:
    '''
    physids = set()
    if os.access("/proc/cpuinfo", os.R_OK):
        try:
            log.debug("Trying /proc/cpuinfo to get CPU socket count")
            with open('/proc/cpuinfo') as handle:
                for line in handle.readlines():
                    if line.strip().startswith('physical id'):
                        comps = line.split(':')
                        if len(comps) < 2 or len(comps[1]) < 2:
                            continue
                        physids.add(comps[1].strip())
            if physids:
                return {'cpusockets': len(physids)}
        except Exception as error:
            log.debug(str(error))
            feedback.append("/proc/cpuinfo: {0}".format(str(error)))
        else:
            feedback.append('/proc/cpuinfo: format is not applicable')


def _dmidecode(feedback):
    '''
    Use dmidecode method.

    :return:
    '''
    dmidecode = _which_bin(['dmidecode'])
    if dmidecode is not None:
        try:
            log.debug("Trying dmidecode to get CPU socket count")
            ret = __salt__['cmd.run_all']("{0} -t processor".format(dmidecode), output_loglevel='quiet')
            if ret['retcode'] == 0:
                count = 0
                for line in ret['stdout'].strip().splitlines():
                    if 'Processor Information' in line:
                        count += 1
                if count:
                    return {'cpusockets': count}
        except Exception as error:
            log.debug(str(error))
            feedback.append("dmidecode: {0}".format(str(error)))
    else:
        feedback.append("dmidecode: executable not found")


def cpusockets():
    """
    Returns the number of CPU sockets.
    """
    feedback = list()
    grains = _lscpu(feedback) or _parse_cpuinfo(feedback) or _dmidecode(feedback)
    if not grains:
        log.warn("Could not determine CPU socket count: {0}".format(' '.join(feedback)))

    return grains


def total_num_cpus():
    """ returns the total number of CPU in system.
    /proc/cpuinfo shows the number of active CPUs
    On s390x this can be different from the number of present CPUs in a system
    See IBM redbook: "Using z/VM for Test and Development Environments: A Roundup" chapter 3.5
    """
    re_cpu = re.compile(r"^cpu[0-9]+$")
    sysdev = '/sys/devices/system/cpu/'
    return {'total_num_cpus': len([cpud for cpud in (os.path.exists(sysdev) and os.listdir(sysdev) or list())
                                   if re_cpu.match(cpud)])}
   070701000000A0000081B40000000000000000000000015EA152C400000F89000000000000000000000000000000000000002B00000000susemanager-sls/src/grains/public_cloud.py    # -*- coding: utf-8 -*-
'''
Copyright (c) 2019 SUSE LLC

This software is licensed to you under the GNU General Public License,
version 2 (GPLv2). There is NO WARRANTY for this software, express or
implied, including the implied warranties of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
along with this software; if not, see
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.

This grain module is only loaded in case of a public cloud instance.

Supported Instances: AWS EC2, Azure and Google Compute Engine instances

Returns a grain called "instance_id" containing the virtual instance ID
according to the Public Cloud provider. The data is gathered using the
internal API available from within the instance.

Author: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Based on: https://docs.saltstack.com/en/latest/ref/grains/all/salt.grains.metadata.html
'''
from __future__ import absolute_import, print_function, unicode_literals

# Import python libs
import os
import socket
from multiprocessing.pool import ThreadPool
import logging

# Import salt libs
import salt.utils.http as http

# Internal metadata API information
INTERNAL_API_IP = '169.254.169.254'
HOST = 'http://{0}/'.format(INTERNAL_API_IP)

INSTANCE_ID = None

AMAZON_URL_PATH = 'latest/meta-data/'
AZURE_URL_PATH = 'metadata/instance/compute/'
AZURE_API_ARGS = '?api-version=2017-08-01&format=text'
GOOGLE_URL_PATH = 'computeMetadata/v1/instance/'

log = logging.getLogger(__name__)


def __virtual__():
    global INSTANCE_ID
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.settimeout(0.1)
    result = sock.connect_ex((INTERNAL_API_IP, 80))
    if result != 0:
        return False

    def _do_api_request(data):
        opts = {
            'http_connect_timeout': 0.1,
            'http_request_timeout': 0.1,
        }
        try:
            ret = {
                data[0]: http.query(data[1],
                                    status=True,
                                    header_dict=data[2],
                                    raise_error=False,
                                    opts=opts)
            }
        except:
            ret = { data[0]: dict() }
        return ret

    api_check_dict = [
        ('amazon', os.path.join(HOST, AMAZON_URL_PATH), None),
        ('google', os.path.join(HOST, GOOGLE_URL_PATH), {"Metadata-Flavor": "Google"}),
        ('azure', os.path.join(HOST, AZURE_URL_PATH) + AZURE_API_ARGS, {"Metadata":"true"}),
    ]

    api_ret = {}
    results = []

    try:
       pool = ThreadPool(3)
       results = pool.map(_do_api_request, api_check_dict)
       pool.close()
       pool.join()
    except Exception as exc:
       import traceback
       log.error(traceback.format_exc())
       log.error("Exception while creating a ThreadPool for accessing metadata API: %s", exc)

    for i in results:
        api_ret.update(i)

    if api_ret['amazon'].get('status', 0) == 200 and "instance-id" in api_ret['amazon']['body']:
        INSTANCE_ID = http.query(os.path.join(HOST, AMAZON_URL_PATH, 'instance-id'), raise_error=False)['body']
        return True
    elif api_ret['azure'].get('status', 0) == 200 and "vmId" in api_ret['azure']['body']:
        INSTANCE_ID = http.query(os.path.join(HOST, AZURE_URL_PATH, 'vmId') + AZURE_API_ARGS, header_dict={"Metadata":"true"}, raise_error=False)['body']
        return True
    elif api_ret['google'].get('status', 0) == 200 and "id" in api_ret['google']['body']:
        INSTANCE_ID = http.query(os.path.join(HOST, GOOGLE_URL_PATH, 'id'), header_dict={"Metadata-Flavor": "Google"}, raise_error=False)['body']
        return True

    return False


def instance_id():
    global INSTANCE_ID
    ret = {}
    if INSTANCE_ID:
        ret['instance_id'] = INSTANCE_ID
    return ret

def is_payg_instance():
    ret = {}
    if os.path.isfile('/usr/sbin/registercloudguest'):
        ret['is_payg_instance'] = True
    return ret
   070701000000A1000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001C00000000susemanager-sls/src/modules   070701000000A2000081B40000000000000000000000015EA152C400000000000000000000000000000000000000000000002800000000susemanager-sls/src/modules/__init__.py   070701000000A3000081B40000000000000000000000015EA152C400002C95000000000000000000000000000000000000002900000000susemanager-sls/src/modules/kiwi_info.py  import salt.exceptions
import logging
import os
import re
import hashlib
import pickle

log = logging.getLogger(__name__)

def parse_profile(chroot):
    ret = {}
    path = os.path.join(chroot, 'image', '.profile')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)='(?P<val>.*)'")
        for line in profile.splitlines():
            match = pattern.match(line)
            if match:
                ret[match.group('name')] = match.group('val')
    return ret

def parse_buildinfo(dest):
    ret = {}
    path = os.path.join(dest, 'kiwi.buildinfo')
    if __salt__['file.file_exists'](path):
        profile = __salt__['cp.get_file_str'](path)
        pattern_group = re.compile(r"^\[(?P<name>.*)\]")
        pattern_val = re.compile(r"^(?P<name>.*?)=(?P<val>.*)")

        group = ret
        for line in profile.splitlines():
            match = pattern_group.match(line)
            if match:
                group = {}
                ret[match.group('name')] = group

            match = pattern_val.match(line)
            if match:
                group[match.group('name')] = match.group('val')
    return ret

# fallback for SLES11 Kiwi and for Kiwi NG that does not create the buildinfo file
def guess_buildinfo(dest):
    ret = {'main': {}}
    files = __salt__['file.readdir'](dest)

    pattern_basename = re.compile(r"^(?P<basename>.*)\.packages$")
    pattern_pxe_initrd = re.compile(r"^initrd-netboot.*")
    pattern_pxe_kiwi_ng_initrd = re.compile(r".*\.initrd\..*")
    pattern_pxe_kernel = re.compile(r".*\.kernel\..*")
    pattern_pxe_kiwi_ng_kernel = re.compile(r".*\.kernel$")
    have_kernel = False
    have_initrd = False

    for f in files:
        match = pattern_basename.match(f)
        if match:
            ret['main']['image.basename'] = match.group('basename')

        match = pattern_pxe_initrd.match(f) or pattern_pxe_kiwi_ng_initrd.match(f)
        if match:
            have_initrd = True

        match = pattern_pxe_kernel.match(f) or pattern_pxe_kiwi_ng_kernel.match(f)
        if match:
            have_kernel = True

    if have_kernel and have_initrd:
        ret['main']['image.type'] = 'pxe'
    return ret

# Kiwi NG
def parse_kiwi_result(dest):
    path = os.path.join(dest, 'kiwi.result')
    ret = {}
    if __salt__['file.file_exists'](path):
        try:
            # pickle depends on availability of python kiwi modules
            # which are not under our control so there is certain risk of failure
            # return empty dict in such case
            # the caller should handle all values as optional
            with open(path, 'rb') as f:
                result = pickle.load(f)
                ret['arch'] = result.xml_state.host_architecture
                ret['basename'] = result.xml_state.xml_data.name
                ret['type'] = result.xml_state.build_type.image
                ret['filesystem'] = result.xml_state.build_type.filesystem
                ret['initrd_system'] = result.xml_state.build_type.initrd_system
        except:
            log.exception("Loading kiwi.result")
            # continue with empty dict
    return ret

def parse_packages(path):
    ret = []
    if __salt__['file.file_exists'](path):
        packages = __salt__['cp.get_file_str'](path)
        pattern = re.compile(r"^(?P<name>.*?)\|(?P<epoch>.*?)\|(?P<version>.*?)\|(?P<release>.*?)\|(?P<arch>.*?)\|(?P<disturl>.*?)(\|(?P<license>.*))?$")
        for line in packages.splitlines():
            match = pattern.match(line)
            if match:
                # translate '(none)' values to ''
                d = match.groupdict()
                for k in list(d.keys()):
                    if d[k] == '(none)':
                        d[k] = ''

                # if arch is '' and name begins gpg-pubkey then skip the package
                if d['arch'] == '' and d['name'].startswith('gpg-pubkey'):
                    continue

                ret.append(d)
    return ret

def get_md5(path):
    res = {}
    if not __salt__['file.file_exists'](path):
        return res

    res['hash'] = __salt__['file.get_hash'](path, form='md5')
    res['size'] = __salt__['file.stats'](path).get('size')
    return res

def parse_kiwi_md5(path, compressed = False):
    res = {}

    if not __salt__['file.file_exists'](path):
        return res

    md5_str = __salt__['cp.get_file_str'](path)
    if md5_str is not None:
        if compressed:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s+(?P<csize1>[0-9]+)\s+(?P<csize2>[0-9]+)\s*$")
        else:
            pattern = re.compile(r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s*$")
        match = pattern.match(md5_str)
        if match:
            res['hash'] = match.group('md5')
            res['size'] = int(match.group('size1')) * int(match.group('size2'))
            if compressed:
                res['compressed_size'] = int(match.group('csize1')) * int(match.group('csize2'))
    return res

_compression_types = [
    { 'suffix': '.gz', 'compression': 'gzip' },
    { 'suffix': '.bz', 'compression': 'bzip' },
    { 'suffix': '.xz', 'compression': 'xz' },
    { 'suffix': '.install.iso',    'compression': None },
    { 'suffix': '.iso',            'compression': None },
    { 'suffix': '.raw',            'compression': None },
    { 'suffix': '',    'compression': None }
    ]

def image_details(dest, bundle_dest = None):
    res = {}
    buildinfo = parse_buildinfo(dest) or guess_buildinfo(dest)
    kiwiresult = parse_kiwi_result(dest)

    basename = buildinfo.get('main', {}).get('image.basename', '')
    image_type = kiwiresult.get('type') or buildinfo.get('main', {}).get('image.type', 'unknown')
    fstype = kiwiresult.get('filesystem')

    pattern = re.compile(r"^(?P<name>.*)\.(?P<arch>.*)-(?P<version>.*)$")
    match = pattern.match(basename)
    if match:
        name = match.group('name')
        arch = match.group('arch')
        version = match.group('version')
    else:
        return None

    filename = None
    filepath = None
    compression = None
    for c in _compression_types:
        path = os.path.join(dest, basename + c['suffix'])
        if __salt__['file.file_exists'](path):
            compression = c['compression']
            filename = basename + c['suffix']
            filepath = path
            break

    res['image'] = {
        'basename': basename,
        'name': name,
        'arch': arch,
        'type': image_type,
        'version': version,
        'compression': compression,
        'filename': filename,
        'filepath': filepath,
        'fstype': fstype
    }

    res['image'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5'), compression is not None))

    if bundle_dest is not None:
      res['bundle'] = inspect_bundle(bundle_dest, basename)

    return res

def inspect_image(dest, bundle_dest = None):
    res = image_details(dest, bundle_dest)
    if not res:
      return None

    basename = res['image']['basename']
    image_type = res['image']['type']

    for fstype in ['ext2', 'ext3', 'ext4', 'btrfs', 'xfs']:
        path = os.path.join(dest, basename + '.' + fstype)
        if __salt__['file.file_exists'](path) or __salt__['file.is_link'](path):
            res['image']['fstype'] = fstype
            break

    res['packages'] = parse_packages(os.path.join(dest, basename + '.packages'))

    if image_type == 'pxe':
        res['boot_image'] = inspect_boot_image(dest)

    return res


def inspect_boot_image(dest):
    res = None
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<name>.*)\.(?P<arch>.*)-(?P<version>.*)\.kernel\.(?P<kernelversion>.*)\.md5$")
    pattern_kiwi_ng = re.compile(r"^(?P<name>[^-]*)\.(?P<arch>[^-]*)-(?P<version>[^-]*)-(?P<kernelversion>.*)\.kernel$")
    for f in files:
        match = pattern.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                    },
                'kiwi_ng': False
            }
            break
        match = pattern_kiwi_ng.match(f)
        if match:
            basename = match.group('name') + '.' + match.group('arch') + '-' + match.group('version')
            res = {
                'name': match.group('name'),
                'arch': match.group('arch'),
                'basename': basename,
                'initrd': {
                    'version': match.group('version')
                    },
                'kernel': {
                    'version': match.group('kernelversion')
                },
                'kiwi_ng': True
            }
            break

    if res is None:
        return None

    for c in _compression_types:
        if res['kiwi_ng']:
            path = basename + '.initrd' + c['suffix']
        else:
            path = basename + c['suffix']
        if __salt__['file.file_exists'](os.path.join(dest, path)):
            res['initrd']['filename'] = path

            if res['kiwi_ng']:
                res['initrd'].update(get_md5(os.path.join(dest, path)))
            else:
                res['initrd'].update(parse_kiwi_md5(os.path.join(dest, basename + '.md5')))
            break

    if res['kiwi_ng']:
        path = os.path.join(dest, basename + '-' + res['kernel']['version'] + '.kernel')
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '-' + res['kernel']['version'] + '.kernel'
            res['kernel'].update(get_md5(path))
    else:
        path = os.path.join(dest, basename + '.kernel.' + res['kernel']['version'])
        if __salt__['file.file_exists'](path):
            res['kernel']['filename'] = basename + '.kernel.' + res['kernel']['version']
            res['kernel'].update(parse_kiwi_md5(path + '.md5'))

    return res

def inspect_bundle(dest, basename):
    res = None
    files = __salt__['file.readdir'](dest)

    pattern = re.compile(r"^(?P<basename>" + re.escape(basename) + r")-(?P<id>[^.]*)\.(?P<suffix>.*)\.sha256$")
    for f in files:
        match = pattern.match(f)
        if match:
            res = match.groupdict()
            sha256_file = f
            break
    if res is None:
        return None

    sha256_str = __salt__['cp.get_file_str'](os.path.join(dest, sha256_file))
    pattern = re.compile(r"^(?P<hash>[0-9a-f]+)\s+(?P<filename>.*)\s*$")
    match = pattern.match(sha256_str)
    if match:
        d = match.groupdict()
        d['hash'] = 'sha256:{0}'.format(d['hash'])
        res.update(d)
        res['filepath'] = os.path.join(dest, res['filename'])

    else:
        # only hash without file name
        pattern = re.compile(r"^(?P<hash>[0-9a-f]+)$")
        match = pattern.match(sha256_str)
        if match:
            res['hash'] = 'sha256:{0}'.format(match.groupdict()['hash'])
            res['filename'] = sha256_file[0:-len('.sha256')]
            res['filepath'] = os.path.join(dest, res['filename'])

    return res
   070701000000A4000081B40000000000000000000000015EA152C400000FEB000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/kiwi_source.py    import salt.exceptions
import logging
import os
from tempfile import mkdtemp
try:
    from urllib.parse import urlparse
except ImportError:
     from urlparse import urlparse

log = logging.getLogger(__name__)

# valid prefixes taken from Docker-CE to be compatible
valid_git_prefixes = ['http://', 'https://', 'git://', 'github.com/', 'git@']
valid_url_prefixes = ['http://', 'https://']
valid_url_suffixes = ['.tar.gz', '.tar.xz', '.tar.bz2', '.tgz', '.tar']

def _isLocal(source):
  return __salt__['file.directory_exists'](source)

def _isGit(source):
  for prefix in valid_git_prefixes:
    if source.startswith(prefix):
      return True
  return False

def _isTarball(source):
  prefix_ok = False
  for prefix in valid_url_prefixes:
    if source.startswith(prefix):
      prefix_ok = True
      break

  if not prefix_ok:
    return False

  for suffix in valid_url_suffixes:
    if source.endswith(suffix):
      return True

  return False

def _prepareDestDir(dest):
  '''
  Check target directory does not exists
  '''
  if os.path.isdir(dest):
    raise salt.exceptions.SaltException('Working directory "{0}" exists before sources are prepared'.format(dest))

def _prepareLocal(source, dest):
  '''
  Make link from `source` to `dest`
  '''
  log.debug('Source is local directory')
  _prepareDestDir(dest)
  __salt__['file.symlink'](source, dest)
  return dest

def _prepareHTTP(source, dest):
  '''
  Download tarball and extract to the directory
  '''
  log.debug('Source is HTTP')
  _prepareDestDir(dest)

  filename = os.path.join(dest, source.split("/")[-1])
  res = __salt__['state.single']('file.managed', filename, source=source, makedirs=True, skip_verify=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  res = __salt__['state.single']('archive.extracted', name=dest, source=filename, skip_verify=True, overwrite=True)
  for s, r in list(res.items()):
    if not r['result']:
      raise salt.exceptions.SaltException(r['comment'])
  return dest

def _prepareGit(source, dest, root):
  _prepareDestDir(dest)

  # checkout git into temporary directory in our build root
  # this is needed if we are interested only in git subtree
  tmpdir = __salt__['temp.dir'](parent=root)

  rev = 'master'
  subdir = None
  url = None

  # parse git uri - i.e. git@github.com/repo/#rev:sub
  # compatible with docker as per https://docs.docker.com/engine/reference/commandline/build/#git-repositories

  try:
    url, fragment = source.split('#', 1)
    try:
      rev, subdir = fragment.split(':', 1)
    except:
      rev = fragment
  except:
    url = source

  # omitted rev means default 'master' branch revision
  if rev == '':
    rev = 'master'

  log.debug('GIT URL: {0}, Revision: {1}, subdir: {2}'.format(url, rev, subdir))
  __salt__['git.init'](tmpdir)
  __salt__['git.remote_set'](tmpdir, url)
  __salt__['git.fetch'](tmpdir)
  __salt__['git.checkout'](tmpdir, rev=rev)

  if subdir:
    if _isLocal(os.path.join(tmpdir, subdir)):
      __salt__['file.symlink'](os.path.join(tmpdir, subdir), dest)
    else:
      raise salt.exceptions.SaltException('Directory is not present in checked out source: {}'.format(subdir))
  else:
    __salt__['file.symlink'](tmpdir, dest)
  return dest

def prepare_source(source, root):
  '''
  Prepare source directory based on different source types.

  source -- string with either local directory path, remote http(s) archive or git repository
  root   -- local directory where to store processed source files

  For git repository following format is understood:
    [http[s]://|git://][user@]hostname/repository[#revision[:subdirectory]]
  '''
  dest = os.path.join(root, 'source')
  log.debug('Preparing build source for {0} to {1}'.format(source, dest))
  if _isLocal(source):
    return _prepareLocal(source, dest)
  elif _isTarball(source):
    return _prepareHTTP(source, dest)
  elif _isGit(source):
    return _prepareGit(source, dest, root)
  else:
    raise salt.exceptions.SaltException('Unknown source format "{0}"'.format(source))
 070701000000A5000081B40000000000000000000000015EA152C40000041F000000000000000000000000000000000000003000000000susemanager-sls/src/modules/mainframesysinfo.py   # -*- coding: utf-8 -*-
'''
s390 utility for Suse Manager

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
import os

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only works if /usr/bin/read_values is accessible
    '''
    return os.access('/usr/bin/read_values', os.X_OK) or \
        os.access('/proc/sysinfo', os.R_OK)


def read_values():
    '''
    Executes /usr/bin/read_values or if not available
    falls back to 'cat /proc/sysinfo'

    CLI Example:

    .. code-block:: bash

        salt '*' mainframesysinfo.read_values
    '''
    if os.access('/usr/bin/read_values', os.X_OK):
        cmd = '/usr/bin/read_values -s'
    else:    
        cmd = 'cat /proc/sysinfo'
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
        raise CommandExecutionError(result['stderr'])

    return result['stdout'] 070701000000A6000081B40000000000000000000000015EA152C400001838000000000000000000000000000000000000002F00000000susemanager-sls/src/modules/mgractionchains.py    # -*- coding: utf-8 -*-
'''
SUSE Manager Action Chains module for Salt

'''
from __future__ import absolute_import

import logging
import os
import sys
import salt.config
import salt.syspaths
import yaml

# Prevent issues due 'salt.utils.fopen' deprecation
try:
    from salt.utils import fopen
except:
    from salt.utils.files import fopen

from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'mgractionchains'

SALT_ACTIONCHAIN_BASE = 'actionchains'


def __virtual__():
    '''
    This module is always enabled while 'state.sls' is available.
    '''
    return __virtualname__ if 'state.sls' in __salt__ else (False, 'state.sls is not available')

def _calculate_sls(actionchain_id, machine_id, chunk):
    return '{0}.actionchain_{1}_{2}_{3}'.format(SALT_ACTIONCHAIN_BASE,
                                                actionchain_id,
                                                machine_id,
                                                chunk)

def _get_ac_storage_filenamepath():
    '''
    Calculate the filepath to the '_mgractionchains.conf' which is placed
    by default in /etc/salt/minion.d/
    '''
    config_dir = __opts__.get('conf_dir', None)
    if config_dir is None and 'conf_file' in __opts__:
        config_dir = os.path.dirname(__opts__['conf_file'])
    if config_dir is None:
        config_dir = salt.syspaths.CONFIG_DIR

    minion_d_dir = os.path.join(
        config_dir,
        os.path.dirname(__opts__.get('default_include',
                                      salt.config.DEFAULT_MINION_OPTS['default_include'])))

    return os.path.join(minion_d_dir, '_mgractionchains.conf')

def _read_next_ac_chunk(clear=True):
    '''
    Read and remove the content of '_mgractionchains.conf' file. Return the parsed YAML.
    '''
    f_storage_filename = _get_ac_storage_filenamepath()
    if not os.path.isfile(f_storage_filename):
        return None
    ret = None
    try:
        with fopen(f_storage_filename, "r") as f_storage:
            ret = yaml.load(f_storage.read())
        if clear:
            os.remove(f_storage_filename)
        return ret
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error processing YAML from '{0}': {1}".format(f_storage_filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def _add_boot_time(next_chunk, prefix):
    '''
    Add the current boot time to the next_chunk dict
    '''
    uptime = __salt__["status.uptime"]()
    next_chunk["{0}_boot_time".format(prefix)] = uptime["since_iso"]

def _persist_next_ac_chunk(next_chunk):
    '''
    Persist next_chunk to execute as YAML in '_mgractionchains.conf'
    '''
    _add_boot_time(next_chunk, "persist")
    f_storage_filename = _get_ac_storage_filenamepath()
    try:
        f_storage_dir = os.path.dirname(f_storage_filename);
        if not os.path.exists(f_storage_dir):
            os.makedirs(f_storage_dir)
        with fopen(f_storage_filename, "w") as f_storage:
            f_storage.write(yaml.dump(next_chunk))
    except (IOError, yaml.scanner.ScannerError) as exc:
        err_str = "Error writing YAML from '{0}': {1}".format(f_storage_filename, exc)
        log.error(err_str)
        raise CommandExecutionError(err_str)

def start(actionchain_id):
    '''
    Start the execution of the given SUSE Manager Action Chain

    actionchain_id
        The SUSE Manager Actionchain ID to execute on this minion.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.start 123
    '''
    if os.path.isfile(_get_ac_storage_filenamepath()):
        msg = "Action Chain '{0}' cannot be started. There is already another " \
              "Action Chain being executed. Please check file '{1}'".format(
                actionchain_id, _get_ac_storage_filenamepath())
        log.error(msg)
        raise CommandExecutionError(msg)
    target_sls = _calculate_sls(actionchain_id, __grains__['machine_id'], 1)
    log.debug("Starting execution of SUSE Manager Action Chains ID "
              "'{0}' -> Target SLS: {1}".format(actionchain_id, target_sls))
    ret = __salt__['state.sls'](target_sls, queue=True)
    if isinstance(ret, list):
        raise CommandExecutionError(ret)
    return ret

def next(actionchain_id, chunk, next_action_id=None, ssh_extra_filerefs=None):
    '''
    Persist the next Action Chain chunk to be executed by the 'resume' method.

    next_chunk
        The next target SLS to be executed.

    CLI Example:

    .. code-block:: bash

        salt '*' mgractionchains.next actionchains.actionchain_123_machineid_2
    '''
    yaml_dict = {
        'next_chunk': _calculate_sls(actionchain_id, __grains__['machine_id'], chunk)
    }
    if next_action_id:
        yaml_dict['next_action_id'] = next_action_id
    if ssh_extra_filerefs:
        yaml_dict['ssh_extra_filerefs'] = ssh_extra_filerefs
    _persist_next_ac_chunk(yaml_dict)

def get_pending_resume():
    '''
    Get information about any pending action chain chunk execution.
    '''
    next_chunk = _read_next_ac_chunk(False)
    if next_chunk:
        _add_boot_time(next_chunk, "current")
    return next_chunk or {}



def resume():
    '''
    Continue the execution of a SUSE Manager Action Chain.
    This will trigger the execution of the next chunk SLS file stored on '_mgractionchains.conf'

    This method is called by the Salt Reactor as a response to the 'minion/start/event'.
    '''
    next_chunk = _read_next_ac_chunk()
    if not next_chunk:
        return {}
    if type(next_chunk) != dict:
        err_str = "Not able to resume Action Chain execution! Malformed " \
                  "'_mgractionchains.conf' found: {0}".format(next_chunk)
        log.error(err_str)
        raise CommandExecutionError(err_str)
    next_chunk = next_chunk.get('next_chunk')
    log.debug("Resuming execution of SUSE Manager Action Chain -> Target SLS: "
              "{0}".format(next_chunk))
    return __salt__['state.sls'](next_chunk, queue=True)

def clean():
    '''
    Clean execution of an Action Chain by removing '_mgractionchains.conf'.
    '''
    _read_next_ac_chunk()
    return {"success": True}


070701000000A7000081B40000000000000000000000015EA152C400001315000000000000000000000000000000000000002800000000susemanager-sls/src/modules/sumautil.py   # -*- coding: utf-8 -*-
'''
Utility module for Suse Manager

'''
from __future__ import absolute_import

import logging
import socket
import os
import re
import time
import salt.utils
from salt.exceptions import CommandExecutionError

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)

__virtualname__ = 'sumautil'

SYSFS_NET_PATH = '/sys/class/net'


def __virtual__():
    '''
    Only run on Linux systems
    '''
    return __grains__['kernel'] == 'Linux' and __virtualname__ or False


def cat(path):
    '''
    Cat the specified file.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.cat /tmp/file
    '''
    cmd = 'cat %s' % path
    result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if result['retcode'] != 0:
       return {'retcode': 1, 'stderr': result['stderr']}

    return {'retcode': 0, 'stdout': result['stdout']}


def primary_ips():
    '''
    Get the source IPs that the minion uses to connect to the master.
    Returns the IPv4 and IPv6 address (if available).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.primary_ip
    '''

    get_master_ip = lambda family, host: socket.getaddrinfo(host, 0, family)[0][-1][0]

    master = __opts__.get('master', '')
    log.debug('Using master: {0}'.format(str(master)))

    ret = dict()
    for sock_family, sock_descr in list({socket.AF_INET: 'IPv4', socket.AF_INET6: 'IPv6'}.items()):
        try:
            ret['{0}'.format(sock_descr)] = __salt__['network.get_route'](get_master_ip(sock_family, master))
            log.debug("network.get_route({0}): ".format(ret['{0} source'.format(sock_descr)]))
        except Exception as err:
            log.debug('{0} is not available? {1}'.format(sock_descr, err))

    return ret


def get_net_module(iface):
    '''
    Returns the kernel module used for the give interface
    or None if the module could not be determined of if the
    interface name is wrong.
    Uses '/sys/class/net' to find out the module.

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_module eth0
    '''
    sysfspath = os.path.join(SYSFS_NET_PATH, iface, 'device/driver')

    return os.path.exists(sysfspath) and os.path.split(os.readlink(sysfspath))[-1] or None


def get_net_modules():
    '''
    Returns a dictionary of all network interfaces and their
    corresponding kernel module (if it could be determined).

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_net_modules
    '''
    drivers = dict()
    for devdir in os.listdir(SYSFS_NET_PATH):
        try:
            drivers[devdir] = get_net_module(devdir)
        except OSError as devdir:
            log.warn("An error occurred getting net driver for {0}".format(devdir), exc_info=True)

    return drivers or None

def get_kernel_live_version():
    '''
    Returns the patch version of live patching if it is active,
    otherwise None

    CLI Example:

    .. code-block:: bash

        salt '*' sumautil.get_kernel_live_version
    '''
    kernel_live_version = _klp()
    if not kernel_live_version:
        log.debug("No kernel live patch is active")

    return kernel_live_version

def _klp():
    '''
    klp to identify the current kernel live patch

    :return:
    '''
    # get 'kgr' for versions prior to SLE 15
    try:
        from salt.utils.path import which_bin as _which_bin
    except:
        from salt.utils import which_bin as _which_bin

    klp = _which_bin(['klp', 'kgr'])
    patchname = None
    if klp is not None:
        try:
            # loop until patching is finished
            for i in range(10):
                stat = __salt__['cmd.run_all']('{0} status'.format(klp), output_loglevel='quiet')
                log.debug("klp status: {0}".format(stat['stdout']))
                if stat['stdout'].strip().splitlines()[0] == 'ready':
                    break
                time.sleep(1)
            re_active = re.compile(r"^\s+active:\s*(\d+)$")
            ret = __salt__['cmd.run_all']('{0} -v patches'.format(klp), output_loglevel='quiet')
            log.debug("klp patches: {0}".format(ret['stdout']))
            if ret['retcode'] == 0:
                for line in ret['stdout'].strip().splitlines():
                    if line.startswith('#'):
                        continue

                    match_active = re_active.match(line)
                    if match_active and int(match_active.group(1)) > 0:
                        return {'mgr_kernel_live_version': patchname }
                    elif line.startswith('kgraft') or line.startswith('livepatch'):
                        # kgr patches have prefix 'kgraft', whereas klp patches start with 'livepatch'
                        patchname = line.strip()

        except Exception as error:
            log.error("klp: {0}".format(str(error)))
   070701000000A8000081B40000000000000000000000015EA152C400000B37000000000000000000000000000000000000002600000000susemanager-sls/src/modules/udevdb.py # -*- coding: utf-8 -*-
'''
Export udev database

'''
from __future__ import absolute_import

import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
try:
    from salt.utils.path import which_bin as _which_bin
except ImportError:
    from salt.utils import which_bin as _which_bin

__salt__ = {
    'cmd.run_all': salt.modules.cmdmod.run_all,
}

log = logging.getLogger(__name__)


def __virtual__():
    '''
    Only work when udevadm is installed.
    '''
    return _which_bin(['udevadm']) is not None


def exportdb():
    '''
    Extract all info delivered by udevadm

    CLI Example:

    .. code-block:: bash

        salt '*' udev.info /dev/sda
        salt '*' udev.info /sys/class/net/eth0
    '''

    cmd = 'udevadm info --export-db'
    udev_result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet')

    if udev_result['retcode'] != 0:
        raise CommandExecutionError(udev_result['stderr'])

    devices = []
    dev = {}
    for line in (line.strip() for line in udev_result['stdout'].splitlines()):
        if line:
            line = line.split(':', 1)
            if len(line) != 2:
                continue
            query, data = line
            if query == 'E':
                if query not in dev:
                    dev[query] = {}
                key, val = data.strip().split('=', 1)

                try:
                    val = int(val)
                except ValueError:
                    try:
                        val = float(val)
                    except ValueError:
                        pass  # Quiet, this is not a number.

                dev[query][key] = val
            else:
                if query not in dev:
                    dev[query] = []
                dev[query].append(data.strip())
        else:
            if dev:
                normalize(dev)
                add_scsi_info(dev)
                devices.append(dev)
                dev = {}
    if dev:
        normalize(dev)
        add_scsi_info(dev)
        devices.append(dev)

    return devices


def normalize(dev):
    '''
    Replace list with only one element to the value of the element.

    :param dev:
    :return:
    '''
    for sect, val in list(dev.items()):
        if isinstance(val, list) and len(val) == 1:
            dev[sect] = val[0]

    return dev


def add_scsi_info(dev):
    '''
    Add SCSI info from sysfs
    '''
    if dev.get('E') and dev.get('E').get('SUBSYSTEM') == 'scsi' and dev.get('E').get('DEVTYPE') == 'scsi_device':
        sysfs_path = dev['P']
        scsi_type = __salt__['cmd.run_all']('cat /sys/{0}/type'.format(sysfs_path), output_loglevel='quiet')

        if scsi_type['retcode'] != 0:
            raise CommandExecutionError(scsi_type['stderr'])

        dev['X-Mgr'] = {}
        dev['X-Mgr']['SCSI_SYS_TYPE'] = scsi_type['stdout']
 070701000000A9000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001B00000000susemanager-sls/src/states    070701000000AA000081B40000000000000000000000015EA152C400000AF1000000000000000000000000000000000000002600000000susemanager-sls/src/states/product.py '''
Handles installation of SUSE products using zypper

Only supported with :mod:`zypper <salt.modules.zypper>`
'''

import logging

from salt.utils.versions import version_cmp
from salt.exceptions import CommandExecutionError

log = logging.getLogger(__name__)

__virtualname__ = 'product'

def __virtual__():
    '''
    Only work on SUSE platforms with zypper
    '''
    if __grains__.get('os_family', '') != 'Suse':
        return (False, "Module product: non SUSE OS not supported")

    # Not all versions of SUSE use zypper, check that it is available
    try:
        zypp_info = __salt__['pkg.info_installed']('zypper')['zypper']
    except CommandExecutionError:
        return (False, "Module product: zypper package manager not found")

    # Minimum version that supports 'zypper search --provides'
    if version_cmp(zypp_info['version'], '1.8.13') < 0:
        return (False, "Module product: zypper 1.8.13 or greater required")
    return __virtualname__


def _get_missing_products(refresh):
    # Search for not installed products
    products = []
    try:
        products = __salt__['pkg.search'](
            'product()',
            refresh=refresh,
            match='exact',
            provides=True,
            not_installed_only=True
        ).keys()

        log.debug("The following products are not yet installed: %s", ', '.join(products))

    except CommandExecutionError:
        # No search results
        return None

    # Exclude products that are already provided by another to prevent conflicts
    to_install = []
    for pkg in products:
        try:
            res = __salt__['pkg.search'](
                pkg,
                match='exact',
                provides=True
            ).keys()

            log.debug("The product '%s' is already provided by '%s'. Skipping.", pkg, ', '.join(res))

        except CommandExecutionError:
            # No search results
            # Not provided by any installed package, add it to the list
            to_install.append(pkg)

    return to_install

def all_installed(name, refresh=False, **kwargs):
    '''
    Ensure that all the subscribed products are installed.

    refresh
        force a refresh if set to True.
        If set to False (default) it depends on zypper if a refresh is
        executed.
    '''

    ret = {'name': name,
           'changes': {},
           'result': True,
           'comment': ''}

    to_install = _get_missing_products(refresh)

    if not to_install:
        # All product packages are already installed
        ret['comment'] = "All subscribed products are already installed"
        ret['result'] = True

        log.debug("All products are already installed. Nothing to do.")
        return ret

    return __states__['pkg.installed'](name, pkgs=to_install)
   070701000000AB000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001A00000000susemanager-sls/src/tests 070701000000AC000081B40000000000000000000000015EA152C4000000A8000000000000000000000000000000000000002400000000susemanager-sls/src/tests/README.md   ## Running tests

Run tests from _this_ directory. PyTest installed is required.
To run the tests, issue the following command:

  py.test <ENTER>

That's all for now.
070701000000AD000081B40000000000000000000000015EA152C400000000000000000000000000000000000000000000002600000000susemanager-sls/src/tests/__init__.py 070701000000AE000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001F00000000susemanager-sls/src/tests/data    070701000000AF000081B40000000000000000000000015EA152C4000000ED000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/cpuinfo.ppc64le.sample processor	: 0
cpu		: POWER8E (raw), altivec supported
clock		: 3425.000000MHz
revision	: 2.1 (pvr 004b 0201)

timebase	: 512000000
platform	: pSeries
model		: IBM pSeries (emulated by qemu)
machine		: CHRP IBM pSeries (emulated by qemu)
   070701000000B0000081B40000000000000000000000015EA152C400000303000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/cpuinfo.s390.sample    vendor_id       : IBM/S390
# processors    : 1
bogomips per cpu: 2913.00
features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs
cache0          : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1          : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2          : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3          : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4          : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5          : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF,  identification = 0F9A27,  machine = 2827
 070701000000B1000081B40000000000000000000000015EA152C4000010C4000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/data/cpuinfo.sample processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1314.117
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 0
initial apicid	: 0
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2100.109
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 2
apicid		: 1
initial apicid	: 1
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 1718.742
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 2
initial apicid	: 2
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 61
model name	: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping	: 4
microcode	: 0x22
cpu MHz		: 2108.335
cache size	: 4096 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 2
apicid		: 3
initial apicid	: 3
fpu		: yes
fpu_exception	: yes
cpuid level	: 20
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs		:
bogomips	: 5187.99
clflush size	: 64
cache_alignment	: 64
address sizes	: 39 bits physical, 48 bits virtual
power management:

070701000000B2000081B40000000000000000000000015EA152C4000006CF000000000000000000000000000000000000003000000000susemanager-sls/src/tests/data/dmidecode.sample   # dmidecode 3.0
Getting SMBIOS data from sysfs.
SMBIOS 2.7 present.

Handle 0x0004, DMI type 4, 42 bytes
Processor Information
	Socket Designation: U3E1
	Type: Central Processor
	Family: Core i7
	Manufacturer: Intel(R) Corporation
	ID: D4 06 03 00 FF FB EB BF
	Signature: Type 0, Family 6, Model 61, Stepping 4
	Flags:
		FPU (Floating-point unit on-chip)
		VME (Virtual mode extension)
		DE (Debugging extension)
		PSE (Page size extension)
		TSC (Time stamp counter)
		MSR (Model specific registers)
		PAE (Physical address extension)
		MCE (Machine check exception)
		CX8 (CMPXCHG8 instruction supported)
		APIC (On-chip APIC hardware supported)
		SEP (Fast system call)
		MTRR (Memory type range registers)
		PGE (Page global enable)
		MCA (Machine check architecture)
		CMOV (Conditional move instruction supported)
		PAT (Page attribute table)
		PSE-36 (36-bit page size extension)
		CLFSH (CLFLUSH instruction supported)
		DS (Debug store)
		ACPI (ACPI supported)
		MMX (MMX technology supported)
		FXSR (FXSAVE and FXSTOR instructions supported)
		SSE (Streaming SIMD extensions)
		SSE2 (Streaming SIMD extensions 2)
		SS (Self-snoop)
		HTT (Multi-threading)
		TM (Thermal monitor supported)
		PBE (Pending break enabled)
	Version: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
	Voltage: 1.1 V
	External Clock: 100 MHz
	Max Speed: 3600 MHz
	Current Speed: 2600 MHz
	Status: Populated, Enabled
	Upgrade: Socket BGA1168
	L1 Cache Handle: 0x0005
	L2 Cache Handle: 0x0006
	L3 Cache Handle: 0x0007
	Serial Number: None
	Asset Tag: None
	Part Number: None
	Core Count: 2
	Core Enabled: 2
	Thread Count: 4
	Characteristics:
		64-bit capable
		Multi-Core
		Hardware Thread
		Execute Protection
		Enhanced Virtualization
		Power/Performance Control

 070701000000B3000081B40000000000000000000000015EA152C4000000D7000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-1.sample  kgraft_patch_1_2_2
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-1-2.2.x86_64
    CVE: (none - this is an initial kGraft patch)
    bug fixes and enhancements: (none)

kgraft_patch_2_2_1
    active: 0
 070701000000B4000081B40000000000000000000000015EA152C4000000CA000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-2.sample  kgraft_patch_1_2_2
    active: 0

kgraft_patch_2_2_1
    active: 1
    RPM: kgraft-patch-3_12_62-60_64_8-default-2-2.1.x86_64
    CVE: CVE-2016-8666 CVE-2016-6480
    bug fixes and enhancements: (none)
  070701000000B5000081B40000000000000000000000015EA152C4000000C7000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu.ppc64le.sample   # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i
0,0,0,0,,0,0

 070701000000B6000081B40000000000000000000000015EA152C4000000D1000000000000000000000000000000000000003100000000susemanager-sls/src/tests/data/lscpu.s390.sample  # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2d,L2i
0,0,0,,,0,0,0,0
   070701000000B7000081B40000000000000000000000015EA152C400000103000000000000000000000000000000000000002C00000000susemanager-sls/src/tests/data/lscpu.sample   # The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,0,0,0,,0,0,0,0
2,1,0,0,,1,1,1,0
3,1,0,0,,1,1,1,0
 070701000000B8000081B40000000000000000000000015EA152C4000004D9000000000000000000000000000000000000002B00000000susemanager-sls/src/tests/data/udev.sample    P: /devices/LNXSYSTM:00/LNXPWRBN:00
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00
E: DRIVER=button
E: MODALIAS=acpi:LNXPWRBN:
E: SUBSYSTEM=acpi

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: EV=3
E: ID_FOR_SEAT=input-acpi-LNXPWRBN_00
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: KEY=10000000000000 0
E: MODALIAS=input:b0019v0000p0001e0000-e0,1,k74,ramlsfw
E: NAME="Power Button"
E: PHYS="LNXPWRBN/button/input0"
E: PRODUCT=19/0/1/0
E: PROP=0
E: SUBSYSTEM=input
E: TAGS=:seat:
E: USEC_INITIALIZED=2010022

P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
N: input/event2
E: BACKSPACE=guess
E: DEVNAME=/dev/input/event2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: MAJOR=13
E: MINOR=66
E: SUBSYSTEM=input
E: TAGS=:power-switch:
E: USEC_INITIALIZED=2076101
E: XKBLAYOUT=us
E: XKBMODEL=pc105

P: /devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVPATH=/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVTYPE=scsi_device
E: DRIVER=sd
E: MODALIAS=scsi:t-0x00
E: SUBSYSTEM=scsi
   070701000000B9000081B40000000000000000000000015EA152C4000000E7000000000000000000000000000000000000003900000000susemanager-sls/src/tests/data/virt_state-test.initcache  (dp1
S'domain_data'
p2
(dp3
I5
(dp4
S'name'
p5
S'testvm'
p6
sS'virt_type'
p7
S'para_virtualized'
p8
sS'state'
p9
S'running'
p10
sS'vcpus'
p11
I2
sS'memory_size'
p12
S'1024'
p13
sS'uuid'
p14
I5
sssS'expire_time'
p15
L2141506800L
s.
 070701000000BA000081B40000000000000000000000015EA152C40000033E000000000000000000000000000000000000002500000000susemanager-sls/src/tests/mockery.py  import sys
import os
try:
    from cStringIO import StringIO
except ImportError:
    from io import StringIO
from mock import MagicMock


def setup_environment():
    '''
    Mock the environment.
    :return:
    '''
    if 'salt' not in sys.modules or not isinstance(sys.modules['salt'], MagicMock):
        sys.modules['salt'] = MagicMock()
        sys.modules['salt.utils'] = MagicMock()
        sys.modules['salt.utils.versions'] = MagicMock()
        sys.modules['salt.modules'] = MagicMock()
        sys.modules['salt.modules.cmdmod'] = MagicMock()
        sys.modules['salt.exceptions'] = MagicMock(CommandExecutionError=Exception)


def get_test_data(filename):
    '''
    Get a test data.

    :param filename:
    :return:
    '''
    return open(os.path.sep.join([os.path.abspath(''), 'data', filename]), 'r').read()
  070701000000BB000081B40000000000000000000000015EA152C400000518000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_beacon_pkgset.py   '''
Author: Bo Maryniuk <bo@suse.de>
'''

from mock import MagicMock, patch
from ..beacons import pkgset

pkgset.__context__ = dict()


def test_virtual():
    '''
    Test virtual function.
    '''
    with patch.object(pkgset.os.path, "exists", MagicMock(return_value=True)):
        assert pkgset.__virtual__() == pkgset.__virtualname__
    with patch.object(pkgset.os.path, "exists", MagicMock(return_value=False)):
        assert pkgset.__virtual__() != pkgset.__virtualname__


def test_validate():
    '''
    Test validate() function
    '''
    res, msg = pkgset.validate({'cookie': '/bogus/path'})
    assert res is True
    assert msg == 'Configuration validated'

    for cfg in [{}, {'bogus': 'data'}]:
        res, msg = pkgset.validate(cfg)
        assert res is False
        assert msg == 'Cookie path has not been set.'


@patch.object(pkgset.os.path, 'exists', MagicMock(return_value=True))
@patch.object(pkgset, '__context__', {pkgset.__virtualname__: ""})
def test_beacon():
    '''
    Test beacon functionality.
    '''
    mock_content = MagicMock(
        **{'return_value.__enter__.return_value.read.return_value.strip.return_value': 'test'}
    )
    with patch.object(pkgset, 'open', mock_content):
        data = pkgset.beacon({})
        assert data == [{'tag': 'changed'}]
070701000000BC000081B40000000000000000000000015EA152C400001837000000000000000000000000000000000000003400000000susemanager-sls/src/tests/test_beacon_virtpoller.py   '''
Author: Michael Calmer <mc@suse.com>
'''
import sys
import os
import shutil
from mock import MagicMock, patch
sys.modules['libvirt'] = MagicMock()
from ..beacons import virtpoller

virtpoller.__context__ = dict()

CACHE_FILE = '/tmp/virt_state-test.cache'

def test_virtual():
    '''
    Test virtual function.
    '''
    #with patch(virtpoller.HAS_LIBVIRT, True):
    assert virtpoller.__virtual__() == virtpoller.__virtualname__


def test_validate():
    '''
    Test validate() function
    '''
    res, msg = virtpoller.validate({'cache_file': '/bogus/path',
                                    'expire_time': 2})
    assert res is True
    assert msg == 'Configuration validated'


def test_beacon():
    '''
    Test beacon functionality.
    First run without cache file. All systems are "new" and should be reported.
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [0, 1024, 1024, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'exists'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'running'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '1024'
    assert data['uuid'] == 5

def test_beacon_update():
    '''
    Test beacon functionality. Second run with cache file.
    Nothing has changed so the return value of the function
    Should be an empty list
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [0, 1024, 1024, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    print("%s" % ret)
    assert len(ret) == 0

def test_beacon_change():
    '''
    Test beacon functionality. Another run with cache file.
    There are changes so it should report the new values.
    '''
    domain = MagicMock()
    domain.info = MagicMock(name='info')
    domain.info.return_value = [4, 1024, 2048, 2, 30]
    domain.name = MagicMock(name='name')
    domain.name.return_value = 'testvm'

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = [domain]

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'exists'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'stopped'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '2048'
    assert data['uuid'] == 5

def test_beacon_remove():
    '''
    Test beacon functionality. Another run with cache file.
    The former host is not available anymore. Report should
    say "removed"
    '''

    conn = MagicMock()
    conn.listAllDomains = MagicMock(name='listAllDomains')
    conn.listAllDomains.return_value = []

    if os.path.exists(CACHE_FILE):
        os.unlink(CACHE_FILE)
    shutil.copyfile(os.path.sep.join([os.path.abspath(''), 'data', 'virt_state-test.initcache']), CACHE_FILE)

    with patch.object(virtpoller, 'libvirt', MagicMock(return_value=True)):
        with patch.object(virtpoller.libvirt, 'openReadOnly', MagicMock(return_value=conn)):
            with patch.object(virtpoller.binascii, 'hexlify', MagicMock(return_value=5)):
                ret = virtpoller.beacon({'cache_file': CACHE_FILE,
                                         'expire_time': 2})
    assert isinstance(ret, list)
    assert isinstance(ret[0], dict)
    assert sorted(ret[0].keys()) == ['plan']
    assert ret[0]['plan'][0]['event_type'] == 'removed'
    assert 'guest_properties' in ret[0]['plan'][0]
    data = ret[0]['plan'][0]['guest_properties']
    assert data['name'] == 'testvm'
    assert data['virt_type'] == 'para_virtualized'
    assert data['state'] == 'running'
    assert data['vcpus'] == 2
    assert data['memory_size'] == '1024'
    assert data['uuid'] == 5

 070701000000BD000081B40000000000000000000000015EA152C400000BA4000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_grains_cpuinfo.py  '''
Author: bo@suse.de
'''

from mock import MagicMock, patch, mock_open
from . import mockery
mockery.setup_environment()

from ..grains import cpuinfo


def test_total_num_cpus():
    '''
    Test total_num_cpus function.

    :return:
    '''
    os_listdir = ['cpu0', 'cpu1', 'cpu2', 'cpu3', 'cpufreq', 'cpuidle', 'power', 'modalias',
                  'kernel_max', 'possible', 'online', 'offline', 'isolated', 'uevent',
                  'intel_pstate', 'microcode', 'present']

    with patch('os.path.exists', MagicMock(return_value=True)):
        with patch('os.listdir', MagicMock(return_value=os_listdir)):
            cpus = cpuinfo.total_num_cpus()
            assert type(cpus) == dict
            assert 'total_num_cpus' in cpus
            assert cpus['total_num_cpus'] == 4


def test_cpusockets_dmidecode():
    '''
    Test dmidecode sub in cpusockets function.

    :return:
    '''

    sample = mockery.get_test_data('dmidecode.sample')
    cpuinfo.log = MagicMock()
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        with patch.dict(cpuinfo.__salt__, {'cmd.run_all': MagicMock(return_value={'retcode': 0, 'stdout': sample})}):
            out = cpuinfo._dmidecode([])
            assert type(out) == dict
            assert 'cpusockets' in out
            assert out['cpusockets'] == 1


def test_cpusockets_parse_cpuinfo():
    '''
    Test parse_cpuinfo sub in cpusockets function.

    :return:
    '''
    cpuinfo.log = MagicMock()
    # cpuinfo parser is not applicable for non-Intel architectures, so should return nothing.
    for sample_name in ['cpuinfo.s390.sample', 'cpuinfo.ppc64le.sample']:
        with patch('os.access', MagicMock(return_value=True)):
            with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data(sample_name)), create=True):
                assert cpuinfo._parse_cpuinfo([]) is None

    with patch('os.access', MagicMock(return_value=True)):
        with patch.object(cpuinfo, 'open', mock_open(read_data=mockery.get_test_data('cpuinfo.sample')), create=True):
            out = cpuinfo._parse_cpuinfo([])
            assert type(out) == dict
            assert 'cpusockets' in out
            assert out['cpusockets'] == 1


def test_cpusockets_lscpu():
    '''
    Test lscpu sub in cpusockets function.

    :return:
    '''
    for fn_smpl in ['lscpu.ppc64le.sample', 'lscpu.s390.sample', 'lscpu.sample']:
        cpuinfo.log = MagicMock()
        with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
            with patch.dict(cpuinfo.__salt__,
                            {'cmd.run_all': MagicMock(return_value={'retcode': 0,
                                                                    'stdout': mockery.get_test_data(fn_smpl)})}):
                out = cpuinfo._lscpu([])
                assert type(out) == dict
                assert 'cpusockets' in out
                assert out['cpusockets'] == 1

070701000000BE000081B40000000000000000000000015EA152C4000005E3000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_mgr_master_tops.py # -*- coding: utf-8 -*-
'''
:codeauthor:    Pablo Suárez Hernández <psuarezhernandez@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

import sys
sys.path.append("../../modules/tops")

import mgr_master_tops

TEST_MANAGER_STATIC_TOP = {
    "base": [
        "channels",
        "certs",
        "packages",
        "custom",
        "custom_groups",
        "custom_org",
        "formulas",
        "services.salt-minion",
        "services.docker",
        "services.kiwi-image-server"
    ]
}


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert mgr_master_tops.__virtual__() == "mgr_master_tops"


def test_top_default_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when no environment has been specified.
    '''
    kwargs = {'opts': {'environment': None}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_base_saltenv():
    '''
    Test if top function is returning the static SUSE Manager top state
    for base environment when environment is set to "base".
    '''
    kwargs = {'opts': {'environment': 'base'}}
    assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP


def test_top_unknown_saltenv():
    '''
    Test if top function is returning None for unknown salt environments.
    '''
    kwargs = {'opts': {'environment': 'otherenv'}}
    assert mgr_master_tops.top(**kwargs) == None
 070701000000BF000081B40000000000000000000000015EA152C4000004A1000000000000000000000000000000000000003A00000000susemanager-sls/src/tests/test_module_mainframesysinfo.py '''
Author: Bo Maryniuk <bo@suse.de>
'''

import pytest
from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import mainframesysinfo


def test_virtual():
    '''
    Test virtual returns True if setup os.access returns positive, and otherwise.

    :return:
    '''

    with patch('os.access', MagicMock(return_value=True)):
        assert mainframesysinfo.__virtual__() is True

    with patch('os.access', MagicMock(return_value=False)):
        assert mainframesysinfo.__virtual__() is False


def test_read_values():
    '''
    Test the read_values method.

    :return:
    '''
    bogus_data = "bogus data"
    run_all = {'stdout': bogus_data, 'retcode': 0, 'stderr': ''}
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        assert mainframesysinfo.read_values() == bogus_data

    run_all['retcode'] = 1
    run_all['stderr'] = 'error here'
    with patch.dict(mainframesysinfo.__salt__, {'cmd.run_all': MagicMock(return_value=run_all)}):
        with pytest.raises(Exception) as x:
            mainframesysinfo.read_values()
        assert str(x.value) == run_all['stderr']
   070701000000C0000081B40000000000000000000000015EA152C400000608000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_module_sumautil.py '''
Author: mc@suse.com
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import sumautil


def test_livepatching_kernelliveversion():
    '''
    Test kernel_live_version.

    :return:
    '''

    sumautil.log = MagicMock()
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-1.sample')}
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_1_2_2'

        mock = MagicMock(side_effect=[{ 'retcode': 0, 'stdout': 'ready' },
                                    { 'retcode': 0, 'stdout': mockery.get_test_data('livepatching-2.sample') }
                                    ]);
        with patch.dict(sumautil.__salt__, {'cmd.run_all': mock}):
            out = sumautil.get_kernel_live_version()
            assert type(out) == dict
            assert 'mgr_kernel_live_version' in out
            assert out['mgr_kernel_live_version'] == 'kgraft_patch_2_2_1'

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        out = sumautil.get_kernel_live_version()
        assert out is None
070701000000C1000081B40000000000000000000000015EA152C400000F03000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_udevdb.py   '''
Author: Bo Maryniuk <bo@suse.de>
'''

from mock import MagicMock, patch
from . import mockery
mockery.setup_environment()

from ..modules import udevdb


def test_virtual():
    '''
    Test virtual returns True if 'udevadm' is around in the environment.

    :return:
    '''
    with patch('src.modules.udevdb._which_bin', MagicMock(return_value=None)):
        assert udevdb.__virtual__() is False

    with patch('src.modules.udevdb._which_bin', MagicMock(return_value="/bogus/path")):
        assert udevdb.__virtual__() is True


def test_normalize():
    '''
    Test if udevdb.normalize does not returns nested lists that contains only one item.

    :return:
    '''
    data = {'key': ['value', 'here'], 'foo': ['bar'], 'some': 'data'}
    assert udevdb.normalize(data) == {'foo': 'bar', 'some': 'data', 'key': ['value', 'here']}


def test_exportdb():
    '''
    Test udevdb.exportdb method.

    :return:
    '''
    udev_data = mockery.get_test_data('udev.sample')
    out = [{'P': '/devices/LNXSYSTM:00/LNXPWRBN:00',
            'E': {'MODALIAS': 'acpi:LNXPWRBN:',
                  'SUBSYSTEM': 'acpi',
                  'DRIVER': 'button',
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00'}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
            'E': {'SUBSYSTEM': 'input',
                  'PRODUCT': '19/0/1/0',
                  'PHYS': '"LNXPWRBN/button/input0"',
                  'NAME': '"Power Button"',
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2',
                  'MODALIAS': 'input:b0019v0000p0001e0000-e0,1,k74,ramlsfw',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'TAGS': ':seat:',
                  'PROP': 0,
                  'ID_FOR_SEAT': 'input-acpi-LNXPWRBN_00',
                  'KEY': '10000000000000 0',
                  'USEC_INITIALIZED': 2010022,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'EV': 3,
                  'ID_INPUT_KEY': 1}},
           {'P': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
            'E': {'SUBSYSTEM': 'input',
                  'XKBLAYOUT': 'us',
                  'MAJOR': 13,
                  'ID_INPUT': 1,
                  'DEVPATH': '/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2',
                  'ID_PATH_TAG': 'acpi-LNXPWRBN_00',
                  'DEVNAME': '/dev/input/event2',
                  'TAGS': ':power-switch:',
                  'BACKSPACE': 'guess',
                  'MINOR': 66,
                  'USEC_INITIALIZED': 2076101,
                  'ID_PATH': 'acpi-LNXPWRBN:00',
                  'XKBMODEL': 'pc105',
                  'ID_INPUT_KEY': 1},
            'N': 'input/event2'},
           {'P': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0',
            'E': {'MODALIAS': 'scsi:t-0x00',
                  'SUBSYSTEM': 'scsi',
                  'DEVTYPE': 'scsi_device',
                  'DRIVER': 'sd',
                  'DEVPATH': '/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0'
                  },
            'X-Mgr': {'SCSI_SYS_TYPE': '0'}},
           ]

    with patch.dict(udevdb.__salt__, {'cmd.run_all': MagicMock(side_effect=[{'retcode': 0, 'stdout': udev_data},
                                                                            {'retcode': 0, 'stdout': '0'}])}):
        data = udevdb.exportdb()
        assert data == [_f for _f in data if _f]

        for d_idx, d_section in enumerate(data):
            assert out[d_idx]['P'] == d_section['P']
            assert out[d_idx].get('N') == d_section.get('N')
            assert out[d_idx].get('X-Mgr') == d_section.get('X-Mgr')
            for key, value in list(d_section['E'].items()):
                assert out[d_idx]['E'][key] == value
 070701000000C2000081B40000000000000000000000015EA152C40000105C000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_state_product.py   '''
Author: cbbayburt@suse.com
'''

import sys
from mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()

from ..states import product

# Mock globals
product.log = MagicMock()
product.__salt__ = {}
product.__grains__ = {}

@patch.dict(product.__grains__, {'os_family': 'Suse'})
def test_suse_with_zypper():
    '''
    Test if the state module is available for SUSE OS only with a
    supported version of zypper (>= 1.8.13) available.
    '''
    # Supported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.9.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=1)):
            assert product.__virtual__() is 'product'
            product.version_cmp.assert_called_once_with('1.9.0', '1.8.13')

    # Unsupported zypper version
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value={'zypper': {'version': '1.8.0'}})}):
        with patch.object(product, 'version_cmp', MagicMock(return_value=-1)):
            assert product.__virtual__() == (False, "Module product: zypper 1.8.13 or greater required")
            product.version_cmp.assert_called_once_with('1.8.0', '1.8.13')

    # No zypper available
    with patch.dict(product.__salt__, {'pkg.info_installed': MagicMock(return_value=sys.modules['salt.exceptions'].CommandExecutionError)}):
        assert product.__virtual__() == (False, "Module product: zypper package manager not found")


@patch.dict(product.__grains__, {'os_family': 'Non-Suse'})
def test_non_suse():
    '''
    Test if the state module is unavailable for Non-SUSE OS
    '''
    assert product.__virtual__() == (False, "Module product: non SUSE OS not supported")


def test_get_missing_products():
    '''
    Test if the missing products are returned correctly, excluding
    the ones that are provided by another installed product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'product2': True},
        'provides-product1': {'this-provides-product1': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        sys.modules['salt.exceptions'].CommandExecutionError])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('product2', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that only the non-provided product is returned
        assert res == ['product2']


def test_not_installed_provides():
    '''
    Test if the provided packages are correctly excluded when
    provided by another missing product.
    '''
    test_data = {
        'not_installed': {'product1': True, 'this-provides-product1': True},
        'provides-product1': {'this-provides-product1': True}
    }

    pkg_search_mock = MagicMock(side_effect=[
        test_data['not_installed'],
        test_data['provides-product1'],
        sys.modules['salt.exceptions'].CommandExecutionError])

    with patch.dict(product.__salt__, {'pkg.search': pkg_search_mock}):
        res = product._get_missing_products(False)

        # Expected pkg.search calls
        calls = [
            call('product()', refresh=False, match='exact', provides=True, not_installed_only=True),
            call('product1', match='exact', provides=True),
            call('this-provides-product1', match='exact', provides=True)
        ]

        pkg_search_mock.assert_has_calls(calls)
        assert pkg_search_mock.call_count == 3
        # Assert that not both products are returned
        assert len(res) == 1
        # Assert that the provided product is not returned
        assert 'product1' not in res
        # Assert that the providing product is returned
        assert 'this-provides-product1' in res
070701000000C3000081B40000000000000000000000015EA152C4000061E8000000000000000000000000000000000000002800000000susemanager-sls/susemanager-sls.changes   -------------------------------------------------------------------
Thu Apr 23 10:33:05 CEST 2020 - jgonzalez@suse.com

- version 4.0.25-1
- Fix virt.deleted state dependency
- remove key grains only when file and grain exists (bsc#1167237)
- cleanup key grains after usage
- Use saltutil states if available on the minion (bsc#1167556)
- Make 'product' state module only available for minions with zypper >= 1.8.13 (bsc#1166699)
- Adapt 'mgractionchains' module to work with Salt 3000
- Disable modularity failsafe mechanism for RHEL 8 repos (bsc#1164875)

-------------------------------------------------------------------
Fri Feb 28 12:08:31 CET 2020 - jgonzalez@suse.com

- version 4.0.24-1
- install dmidecode before HW profile update when missing
- Add mgr_start_event_grains.sls to update minion config
- Add 'product' custom state module to handle installation of
  SUSE products at client side (bsc#1157447)
- Support reading of pillar data for minions from multiple files (bsc#1158754)
- Do not workaround util.syncmodules for SSH minions (bsc#1162609)
- Force to run util.synccustomall when triggering action chains on SSH minions (bsc#1162683).
- Add custom 'is_payg_instance' grain when instance is PAYG and not BYOS.
- Adapt sls file for pre-downloading in Ubuntu minions
- sort formulas by execution order (bsc#1083326)
- split remove_traditional_stack into two parts. One for all systems and
  another for clients not being a Uyuni Server or Proxy (bsc#1121640)
- Change the order to check the version correctly for RES (bsc#1152795)
- do not break Servers registering to a Server
- Remove the virt-poller cache when applying Virtualization entitlement
- Force HTTP request timeout on public cloud grain (bsc#1157975)

-------------------------------------------------------------------
Mon Nov 25 10:22:32 CET 2019 - jgonzalez@suse.com

- version 4.0.23-1
- Support license entry in kiwi image packages list
- Install yum plguin for only yum < 4 (bsc#1156173)
- Add self monitoring to Admin Monitoring UI (bsc#1143638)
- configure GPG keys and SSL Certificates for RHEL8 and ES8
- Always run Kiwi with empty cache (bsc#1155899)
- Avoid traceback error due lazy loading which_bin (bsc#1155794)
- Create Kiwi cache dir if not present
- Consider timeout value in salt remote script (bsc#1153181)
- Using new module path for which_bin to get rid of DeprecationWarning
- Fix: match `image_id` with newer k8s (bsc#1149741)
- enable Kiwi NG on SLE15

-------------------------------------------------------------------
Mon Nov 04 15:45:51 CET 2019 - jgonzalez@suse.com

- version 4.0.22-1
- Do not show errors when polling internal metadata API (bsc#1155794)

-------------------------------------------------------------------
Mon Nov 04 11:56:38 CET 2019 - jgonzalez@suse.com

- version 4.0.21-1
- Add missing "public_cloud" custom grain (bsc#1155656)

-------------------------------------------------------------------
Thu Oct 17 12:35:00 CEST 2019 - jgonzalez@suse.com

- version 4.0.20-1
- Require pmtools only for SLE11 i586 and x86_64 (bsc#1150314)
- Introduce dnf-susemanager-plugin for RHEL8 minions
- Provide custom grain to report "instance id" when running on Public Cloud instances
- disable legacy startup events for new minions
- implement provisioning for salt clients
- dmidecode does not exist on ppc64le and s390x (bsc#1145119)
- update susemanager.conf to use adler32 for computing the server_id for new minions

-------------------------------------------------------------------
Thu Aug 29 14:45:38 CEST 2019 - jgonzalez@suse.com

- version 4.0.19-1
- Bootstrapping RES6/RHEL6/SLE11 with TLS1.2 now shows error message. (bsc#1147126)

-------------------------------------------------------------------
Wed Aug 28 17:53:06 CEST 2019 - jgonzalez@suse.com

- version 4.0.18-1
- Fix for issue with bootstrapping RES minions (bsc#1147126)

-------------------------------------------------------------------
Tue Jul 30 15:01:47 CEST 2019 - jgonzalez@suse.com

- version 4.0.17-1
- Force VM off before deleting it (bsc#1138127)
- Check for result of image rsync transfer to catch failures early (bsc#1104949)
- Allow forcing off or resetting VMs
- Make sure dmidecode is installed during bootstrap to ensure that hardware
  refresh works for all operating systems (bsc#1137952)

-------------------------------------------------------------------
Thu Jun 13 10:43:11 CEST 2019 - jgonzalez@suse.com

- version 4.0.16-1
- Prevent stuck Actions when onboarding KVM host minions (bsc#1137888)

-------------------------------------------------------------------
Tue Jun 11 12:18:32 CEST 2019 - jgonzalez@suse.com

- version 4.0.15-1
- Fix formula name encoding on Python 3 (bsc#1137533)

-------------------------------------------------------------------
Fri May 31 13:20:55 CEST 2019 - jgonzalez@suse.com

- version 4.0.14-1
- Fix the indentation so that custom formulas can be read correctly (bsc#1136937)

-------------------------------------------------------------------
Thu May 30 10:58:05 CEST 2019 - jgonzalez@suse.com

- version 4.0.13-1
- Adapt tests for SUSE manager 4.0
- More thorougly disable the Salt mine in util.mgr_mine_config_clean_up (bsc#1135075)

-------------------------------------------------------------------
Wed May 15 15:35:23 CEST 2019 - jgonzalez@suse.com

- version 4.0.12-1
- SPEC cleanup
- Enabling certificate deployment for Leap 15.1 clients which is
  needed for bootstrapping
- States to enable/disable server monitoring
- Improve salt events processing performance (bsc#1125097)

-------------------------------------------------------------------
Mon Apr 22 12:23:43 CEST 2019 - jgonzalez@suse.com

- version 4.0.11-1
- Enable SLES11 OS Image Build Host
- Add support for Salt batch execution mode
- Do not configure Salt Mine in newly registered minions (bsc#1122837)
- use default 'master' branch in OSImage profile URL (bsc#1108218)
- Add Python linting makefile and PyLint configuration file

-------------------------------------------------------------------
Thu Apr 04 14:43:04 CEST 2019 - jgonzalez@suse.com

- version 4.0.10-1
- Update get_kernel_live_version module to support older Salt versions (bsc#1131490)

-------------------------------------------------------------------
Fri Mar 29 10:37:42 CET 2019 - jgonzalez@suse.com

- version 4.0.9-1
- Update get_kernel_live_version module to support SLES 15 live patches
- Support register minion using bootstrap repos for 18.04 and 16.04.

-------------------------------------------------------------------
Mon Mar 25 17:04:34 CET 2019 - jgonzalez@suse.com

- version 4.0.8-1
- Fix Salt error related to remove_traditional_stack when bootstrapping an Ubuntu
  minion (bsc#1128724)
- Adapt disablelocalrepos.sls syntax for Salt 2016.10 (rhel6, sle11) (bsc#1127706)
- Automatically trust SUSE GPG key for client tools channels on Ubuntu systems
- util.systeminfo sls has been added to perform different actions at minion startup(bsc#1122381)

-------------------------------------------------------------------
Sat Mar 02 00:16:05 CET 2019 - jgonzalez@suse.com

- version 4.0.7-1
- Add support for Ubuntu minions
- Add Ubuntu SSL-Cert SLS-Files

-------------------------------------------------------------------
Wed Feb 27 13:17:30 CET 2019 - jgonzalez@suse.com

- version 4.0.6-1
- Fix mgr_events to use current ioloop (bsc#1126280)
- add states for virtual machine actions
- Added option to read 'pkg_download_point_...' pillar values and use it in repo url

-------------------------------------------------------------------
Thu Jan 31 09:45:42 CET 2019 - jgonzalez@suse.com

- version 4.0.5-1
- prevent the pkgset beacon from firing during onboarding (bsc#1122896)
- Prevent excessive DEBUG logging from mgr_events engine

-------------------------------------------------------------------
Wed Jan 16 12:27:07 CET 2019 - jgonzalez@suse.com

- version 4.0.4-1
- Allow bootstrapping minions with a pending minion key being present (bsc#1119727)

-------------------------------------------------------------------
Mon Dec 17 14:46:00 CET 2018 - jgonzalez@suse.com

- version 4.0.3-1
- enhance bootstrap-repo urls for Centos and Opensuse
- use a Salt engine to process return results (bsc#1099988)

-------------------------------------------------------------------
Fri Oct 26 10:52:53 CEST 2018 - jgonzalez@suse.com

- version 4.0.2-1
- deploy SSL certificate during onboarding of openSUSE Leap 15.0 (bsc#1112163)
- install all available known kiwi boot descriptions
- Fix: Cleanup Kiwi cache in highstate (bsc#1109892)
- removed the ssl certificate verification while checking bootstrap repo URL (bsc#1095220)
- Removed the need for curl to be present at bootstrap phase (bsc#1095220)
- Migrate Python code to be Python 2/3 compatible
- Fix merging of image pillars
- Fix: delete old custom OS images pillar before generation (bsc#1105107)
- Generate OS image pillars via Java
- Store activation key in the Kiwi built image
- Implement the 2-phase registration of saltbooted minions (SUMA for Retail)

-------------------------------------------------------------------
Fri Aug 10 15:45:45 CEST 2018 - jgonzalez@suse.com

- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)
- Feat: add OS Image building with Kiwi FATE#322959 FATE#323057 FATE#323056
- Use custom Salt capabilities to prevent breaking backward compatibility (bsc#1096514)
- Update profileupdate.sls to report all versions installed (bsc#1089526)
- Do not install 'python-salt' on container build hosts with older Salt versions
  (bsc#1097699)
- Fix bootstrap error when removing traditional stack (bsc#1096009)

-------------------------------------------------------------------
Wed May 23 09:03:37 CEST 2018 - jgonzalez@suse.com

- version 3.2.13-1
- Changes to mgractionchains module in order to support action chains on
  minions using ssh-push connection method.
- Fix migration from traditional stack to salt registration (bsc#1093825)

-------------------------------------------------------------------
Wed May 16 17:38:30 CEST 2018 - jgonzalez@suse.com

- version 3.2.12-1
- Fix external pillar formula "ifempty" and "namespace" handling
- Fix profileupdate sls to execute retrieval of kernel live patching info (bsc#1091052)
- Use recursive merge on form pillars
- install python2/3 salt flavours on buildhosts to generate a compatible
  thin for the dockerimage beeing build (bsc#1092161)
- docker.login requires a list as input (bsc#1092161)

-------------------------------------------------------------------
Mon May 07 15:31:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.11-1
- fix hardware refresh when FQDN changes (bsc#1073267)
- Handle empty values. Do not pass optional fields to pillar in
  formulas if field is empty and no ifempty attr defined.
- Fixed processing of formulas with $scope: group
- Preserve order of formulas (bsc#1083326)

-------------------------------------------------------------------
Wed Apr 25 12:13:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.10-1
- create bootstrap repo only if it exist in the server (bsc#1087840)

-------------------------------------------------------------------
Mon Apr 23 09:26:09 CEST 2018 - jgonzalez@suse.com

- version 3.2.9-1
- Enqueue states applied from 'mgractionchains' to avoid failures when
  other states are already running at that time (bsc#1090502)

-------------------------------------------------------------------
Wed Apr 04 12:14:25 CEST 2018 - jgonzalez@suse.com

- version 3.2.8-1
- Fix 'mgractionchains.resume' output when nothing to resume (bsc#1087401)

-------------------------------------------------------------------
Thu Mar 29 01:28:50 CEST 2018 - jgonzalez@suse.com

- version 3.2.7-1
- Do not execute sumautil.get_kernel_live_version when inspecting an image

-------------------------------------------------------------------
Mon Mar 26 09:15:31 CEST 2018 - jgonzalez@suse.com

- version 3.2.6-1
- Provide new Salt module and Reactor to handle Action Chains on Minions
- use dockermod with new salt and user repository/tag option for build
- adapt names for gpg keys which have been changed
- perform docker login before building and inspecting images (bsc#1085635)

-------------------------------------------------------------------
Mon Mar 05 09:09:19 CET 2018 - jgonzalez@suse.com

- version 3.2.5-1
- support SLE15 product family

-------------------------------------------------------------------
Wed Feb 28 10:15:38 CET 2018 - jgonzalez@suse.com

- version 3.2.4-1
- Remove SUSE Manager repositories when deleting salt minions
  (bsc#1079847)
- Fix master tops merging when running salt>=2018

-------------------------------------------------------------------
Mon Feb 05 12:53:28 CET 2018 - jgonzalez@suse.com

- version 3.2.3-1
- Allow scheduling the change of software channel changes as an
  action. The previous channels remain accessible to the registered
  system until the action is executed.

-------------------------------------------------------------------
Fri Feb 02 13:06:31 CET 2018 - jgonzalez@suse.com

- version 3.2.2-1
- compare osmajorrelease in jinja always as integer

-------------------------------------------------------------------
Wed Jan 17 13:31:27 CET 2018 - jgonzalez@suse.com

- version 3.2.1-1
- addition of parameters to package manipulation states to improve
  SUSE Manager performance
- python3 compatibility fixes in modules and states
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)
- Fix image inspect when entrypoint is used by overwriting it
  (bsc#1070782)

-------------------------------------------------------------------
Tue Dec 12 12:05:09 CET 2017 - jgonzalez@suse.com

- version 3.1.13-1
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Wed Nov 29 10:15:59 CET 2017 - jgonzalez@suse.com

- version 3.1.12-1
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)

-------------------------------------------------------------------
Tue Nov 28 15:18:20 CET 2017 - jgonzalez@suse.com

- version 3.1.11-1
- Added state templates for deploying/comparing config channels for Salt
- Fix failing certs state for Tumbleweed (bsc#970630)
- Fix deprecated SLS files to avoid deprecation warnings during highstate (bsc#1041993)
- Support xccdf 1.2 namespace in openscap result file (bsc#1059319)
- ensure correct ordering of patches (bsc#1059801)
- fix create empty top.sls with no-op (bsc#1053038)
- Enabling certificate deployment for Leap 42.3 clients which is
  needed for bootstrapping
- fix Salt version detection for patches (bsc#1072350)

-------------------------------------------------------------------
Thu Sep 14 11:41:56 CEST 2017 - mc@suse.de

- version 3.1.10-1
- Kubernetes runner implementation
- addition of parameters to package manipulation states to improve
  SUSE Manager performance

-------------------------------------------------------------------
Fri Jul 21 12:02:24 CEST 2017 - mc@suse.de

- version 3.1.9-1
- disable gpgcheck for bootstrap repo to work with new libzypp (bsc#1049670)
- Remove spacewalk:* repos when removing traditional stack (bsc#1024267)
- susemanager-sls: fix certs state for Tumbleweed (bsc970630)
- susemanager-sls: fix certs state for Leap 42.2 (bsc970630)
- Make sumautil.get_kernel_live_version accept any kgr output 'active: NUM'
  where NUM > 0 (bsc#1044074)

-------------------------------------------------------------------
Mon Jun 19 16:37:53 CEST 2017 - mc@suse.de

- version 3.1.8-1
- Avoids formula leaking on pillar data (bsc#1044236)

-------------------------------------------------------------------
Mon May 29 15:53:51 CEST 2017 - mc@suse.de

- version 3.1.7-1
- fix yum plugin when installing patches on RHEL6 (bsc#1039294)
- Remove suseRegisterInfo in a separate yum transaction so that
  it's not called by yum plugin (bsc#1038732)
- Refactoring formulas in suma_minion external pillar (bsc#1033825)
- configure mime also during bootstrapping
- add missing file name attr to yum plugin state
- Encode formula to str (bsc#1033825)
- update yum on RedHat like systems
- update basic packages when bootstrapping with salt
- use include instead of state.apply channels to fix salt-ssh issue
  (bsc#1036268)

-------------------------------------------------------------------
Wed May 03 15:55:46 CEST 2017 - michele.bologna@suse.com

- version 3.1.6-1
- Targeting patches instead of packages for non Zypper patch installation
- add certificate state for CAASP
- add certificate state for SLES for SAP (bsc#1031659)

-------------------------------------------------------------------
Mon Apr 03 14:47:46 CEST 2017 - mc@suse.de

- version 3.1.5-1
- patch application pre-download
- pre-download packages scheduled for install

-------------------------------------------------------------------
Fri Mar 31 09:48:52 CEST 2017 - mc@suse.de

- version 3.1.4-1
- Fix mainframesysinfo module to use /proc/sysinfo on SLES11
  (bsc#1025758)
- take care that container and images are removed after inspect
- add name to Bootstrap repo
- Pre-create empty top.sls with no-op (bsc#1017754)
- create a random container name
- Fix pkgset beacon (bsc#1029350)
- set minion own key owner to bootstrap ssh_push_sudo_user
- runner to generate ssh key and execute cmd via proxies
- change ssh bootstrap state to generate and auth keys for
  salt-ssh push with tunnel

-------------------------------------------------------------------
Tue Mar 07 14:55:32 CET 2017 - mc@suse.de

- version 3.1.3-1
- add xccdf result xslt
- move move_minion_uploaded_files runner
- call docker inspect for additional data
- remove the container after inspecting it
- do not call image profile automatically after build
- Add state for image profileupdate
- add SUSE Manager prefix to state ids

-------------------------------------------------------------------
Tue Feb 07 15:12:30 CET 2017 - michele.bologna@suse.com

- version 3.1.2-1
- Configure mine.update to submit a job return event (bsc#1022735)
- Disable spacewalksd and spacewalk-update-status when switching to salt
  registration (bsc#1020902)
- Fix timezone handling for rpm installtime (bsc#1017078)
- Push build images into registry
- Configure a Docker build host
- Salt version update

-------------------------------------------------------------------
Wed Jan 11 16:57:58 CET 2017 - michele.bologna@suse.com

- version 3.1.1-1
- Version bump to 3.1

-------------------------------------------------------------------
Fri Dec 16 12:14:52 CET 2016 - michele.bologna@suse.com

- version 0.1.18-1
- Rename 'master' pillar to 'mgr_server'
- Add tunneling to salt-ssh support
- Provide SUMA static pillar data for unregistered minions (bsc#1015122)
- implement fetching kernel live version as module (FATE#319519)
- Removing '/usr/share/susemanager/pillar' path
- Retreiving SUMA static pillar data from ext_pillar (bsc1010674)
- Bugfix: Prevent salt-master ERROR messages if formulas files are missing
  (bsc#1009004)
- fallback to major os release version for cert names (bsc#1009749)

-------------------------------------------------------------------
Mon Nov 07 11:37:52 CET 2016 - michele.bologna@suse.com

- version 0.1.17-1
- Sync custom modules,grains,beacons always before pkg and hw profileupdate
  (bsc#1004725)
- Write distupgrade state for SP migration via salt
- New location of the salt-ssh key/cert pair. The previous location wasn't
  writable by the salt user

-------------------------------------------------------------------
Thu Oct 13 12:50:28 CEST 2016 - mc@suse.de

- version 0.1.16-1
- Only normalize lists (bsc#1004456)
- Call normalize() before add_scsi_info() (bsc#1004456)

-------------------------------------------------------------------
Thu Oct 06 14:51:43 CEST 2016 - mc@suse.de

- version 0.1.15-1
- Fixed bug with numbers in FormulaForm and improved ext_pillar script
- Added formula directories and formulas.sls to setup script
- External pillar script now also includes formula pillars
- Rename symlinks according to changed 'os' grain for Expanded Support
- Adding certs states for RHEL minion based on SLES-ES
- Rename udevdb scsi info json key
- Add support for mapping mainframe sysinfo
- Implement isX86() in jinja more correctly
- Initial support for querying and saving DMI info
- Add support for mapping the devices
- Actually handle incoming hardware details
- Initial version of the hardware.profileupdate sls
- Added pkgset beacon support in susemanager yum plugin
- trust also RES GPG key on all RedHat minions
- trust GPG keys for SUSE Manager Tools channel on RES
- configure bootstrap repository for RES
- Always enable salt-minion service while bootstrapping (bsc#990202)
- CentOS cert state symlinks and fixes
- states for installing certificate on redhat minions
- pkg.list_products only on Suse
- yum plugin to add jwt token as http header
- Generate SLE 12 bootstrap repo path correctly (bsc#994578)
- Merging top.sls files in base env (bsc#986770)
- Watch files instead of require

-------------------------------------------------------------------
Mon Jul 18 14:23:32 CEST 2016 - jrenner@suse.com

- version 0.1.14-1
- Initial version of the boostrap sls file
- update trust store when multiple certs in one file are available on SLE11
- update ca certificates only when they have changed
- assume no pillar data if the yml file for the minion does not exist
  (bsc#980354)
- Add distributable pkgset beacon for RPM database notifications

-------------------------------------------------------------------
Tue May 24 16:04:20 CEST 2016 - kwalter@suse.com

- version 0.1.13-1
- require refresh channels before pkg states (bsc#975424)
- use pillar and static states to install/remove packages (bsc#975424)

-------------------------------------------------------------------
Tue Apr 12 17:15:01 CEST 2016 - mc@suse.de

- version 0.1.12-1
- Add external pillar minion data resolver (bsc#974853)
- Add readme about ext_pillars
- remove pillar top.sls (bsc#974853)

-------------------------------------------------------------------
Wed Apr 06 08:46:20 CEST 2016 - mc@suse.de

- version 0.1.11-1
- generate include only if group_ids not empty
- use state names in custom_groups (bsc#973452)
- rename pillar group_id to group_ids
- Fix generating blank repositories because hitting salt file list cache
  (bsc#971004)
- package pillar/top.sls (bsc#973569)
- pre require coreutils to create the cert symlink in post (bsc#972160)
- disable local repositories on registration (bnc#971788)

-------------------------------------------------------------------
Mon Mar 21 17:38:33 CET 2016 - mc@suse.de

- version 0.1.10-1
- remove unused ext_pillar
- ignore missing .sls to include in certs/init.sls
- ignore packages_{machine_id}.sls if it's missing
- ignore missing pillar files at minion level
- ignore missing sls or pillars in custom_XXX/init.sls
  (bnc#970461, bnc#970316)
- Include minion custom_<machine_id>.sls only if it exists (#bnc970461)
- Ignore missing org custom state (#bnc970461)
- refactor in python (#bnc970316) (#bnc970461)

-------------------------------------------------------------------
Wed Mar 09 11:29:45 CET 2016 - mc@suse.de

- version 0.1.9-1
- include org and groups separately in top.sls
- refresh pillar on remove from group
- initial suma groups external pillar

-------------------------------------------------------------------
Wed Mar 02 12:09:13 CET 2016 - mc@suse.de

- version 0.1.8-1
- rename tables

-------------------------------------------------------------------
Tue Jan 26 14:07:41 CET 2016 - mc@suse.de

- version 0.1.7-1
- cleanup python code according to PR review
- reworked sumautil network utils to be more pythonic
- remove commented code
- get network if modules, checkstyle cleanup
- get minion primary ips

-------------------------------------------------------------------
Sat Jan 16 11:38:17 CET 2016 - mc@suse.de

- version 0.1.6-1
- custom grain for total num of cpus

-------------------------------------------------------------------
Thu Jan 14 13:30:59 CET 2016 - mc@suse.de

- version 0.1.5-1
- Port client python HW handling to server side java
- CPU socket count: try also lscpu and dmidecode

-------------------------------------------------------------------
Tue Jan 05 15:55:57 CET 2016 - mc@suse.de

- version 0.1.4-1
- Fill General and DMI hw info on minion registration

-------------------------------------------------------------------
Wed Dec 16 11:28:21 CET 2015 - mc@suse.de

- version 0.1.3-1
- Add static sls for package management

-------------------------------------------------------------------
Mon Nov 30 11:15:47 CET 2015 - mc@suse.de

- version 0.1.2-1
- force link creation
- use osfullname instead of os
- Cover sles12 machines reporing os grain SUSE
- Add support for deploying certificates to SLES11 minions

-------------------------------------------------------------------
Tue Nov 17 09:35:38 CET 2015 - jrenner@suse.com

- version 0.1.1-1
- Initial package release
070701000000C4000081B40000000000000000000000015EA152C4000013D1000000000000000000000000000000000000002500000000susemanager-sls/susemanager-sls.spec  #
# spec file for package susemanager-sls
#
# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#

%if 0%{?suse_version} > 1320
# SLE15 builds on Python 3
%global build_py3   1
%endif

Name:           susemanager-sls
Version:        4.0.25
Release:        1
Summary:        Static Salt state files for SUSE Manager
License:        GPL-2.0-only
Group:          Applications/Internet
Source:         %{name}-%{version}.tar.gz
Requires(pre):  coreutils
Requires:       susemanager-build-keys-web >= 12.0.1
%if 0%{?build_py3}
BuildRequires:  python3-pytest
BuildRequires:  python3-mock
BuildRequires:  python3-salt
%else
BuildRequires:  python-pytest
BuildRequires:  python-mock
BuildRequires:  python-salt
%endif
BuildRoot:      %{_tmppath}/%{name}-%{version}-build
BuildArch:      noarch

%description
Static Salt state files for SUSE Manager, where generic operations are
provided for the integration between infrastructure components.

%prep
%setup -q

%build

%install
mkdir -p %{buildroot}/usr/share/susemanager/salt/_grains
mkdir -p %{buildroot}/usr/share/susemanager/salt/_beacons
mkdir -p %{buildroot}/usr/share/susemanager/salt/_modules
mkdir -p %{buildroot}/usr/share/susemanager/salt/_states
mkdir -p %{buildroot}/usr/share/susemanager/modules/pillar
mkdir -p %{buildroot}/usr/share/susemanager/modules/tops
mkdir -p %{buildroot}/usr/share/susemanager/modules/runners
mkdir -p %{buildroot}/usr/share/susemanager/modules/engines
mkdir -p %{buildroot}/usr/share/susemanager/pillar_data
mkdir -p %{buildroot}/usr/share/susemanager/formulas
mkdir -p %{buildroot}/usr/share/susemanager/formulas/metadata
mkdir -p %{buildroot}/usr/share/susemanager/reactor
mkdir -p %{buildroot}/usr/share/susemanager/scap
mkdir -p %{buildroot}/srv/formula_metadata
cp -R salt/* %{buildroot}/usr/share/susemanager/salt
cp -R modules/pillar/* %{buildroot}/usr/share/susemanager/modules/pillar
cp -R modules/tops/* %{buildroot}/usr/share/susemanager/modules/tops
cp -R modules/runners/* %{buildroot}/usr/share/susemanager/modules/runners
cp -R modules/engines/* %{buildroot}/usr/share/susemanager/modules/engines
cp -R pillar_data/* %{buildroot}/usr/share/susemanager/pillar_data
cp -R formulas/* %{buildroot}/usr/share/susemanager/formulas
cp -R formula_metadata/* %{buildroot}/srv/formula_metadata
cp -R reactor/* %{buildroot}/usr/share/susemanager/reactor
cp -R scap/* %{buildroot}/usr/share/susemanager/scap

# Manually install Python part to already prepared structure
cp src/beacons/pkgset.py %{buildroot}/usr/share/susemanager/salt/_beacons
cp src/beacons/virtpoller.py %{buildroot}/usr/share/susemanager/salt/_beacons
cp src/grains/cpuinfo.py %{buildroot}/usr/share/susemanager/salt/_grains/
cp src/grains/public_cloud.py %{buildroot}/usr/share/susemanager/salt/_grains/
cp src/modules/sumautil.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mainframesysinfo.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/udevdb.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/mgractionchains.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/kiwi_info.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/modules/kiwi_source.py %{buildroot}/usr/share/susemanager/salt/_modules
cp src/states/product.py %{buildroot}/usr/share/susemanager/salt/_states

%check
cd test
py.test test_pillar_suma_minion.py
cd ../src/tests
py.test

%post
# HACK! Create broken link when it will be replaces with the real file
ln -sf /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT \
   /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT 2>&1 ||:
# Pre-create top.sls to suppress empty/absent top.sls warning/error (bsc#1017754)
USERLAND="/srv/salt"
TOP="$USERLAND/top.sls"
if [ -d "$USERLAND" ]; then
    if [ ! -f "$TOP" ]; then
	cat <<EOF >> $TOP
# This only calls no-op statement from
# /usr/share/susemanager/salt/util/noop.sls state
# Feel free to change it.

base:
  '*':
    - util.noop
EOF
    fi
fi

%files
%defattr(-,root,root)
%dir /usr/share/susemanager
/usr/share/susemanager/salt
/usr/share/susemanager/pillar_data
/usr/share/susemanager/modules
/usr/share/susemanager/modules/pillar
/usr/share/susemanager/modules/tops
/usr/share/susemanager/modules/runners
/usr/share/susemanager/modules/engines
/usr/share/susemanager/formulas
/usr/share/susemanager/reactor
/usr/share/susemanager/scap
/srv/formula_metadata
%ghost /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT

%changelog
   070701000000C5000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001500000000susemanager-sls/test  070701000000C6000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000001A00000000susemanager-sls/test/data 070701000000C7000081B40000000000000000000000015EA152C4000000B6000000000000000000000000000000000000002D00000000susemanager-sls/test/data/formula_order.json  ["branch-network","cpu-mitigations","dhcpd","grafana","image-synchronize","locale","prometheus","prometheus-exporters","pxe","saltboot","tftpd","virtualization-host","vsftpd","bind"]  070701000000C8000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002300000000susemanager-sls/test/data/formulas    070701000000C9000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000002C00000000susemanager-sls/test/data/formulas/metadata   070701000000CA000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003100000000susemanager-sls/test/data/formulas/metadata/bind  070701000000CB000081B40000000000000000000000015EA152C400000A23000000000000000000000000000000000000003A00000000susemanager-sls/test/data/formulas/metadata/bind/form.yml bind:
  $type: hidden-group

  config:
    $type: group
    options:
      $type: edit-group
      $optional: True
      $prototype:
        $type: text
        $key:
          $type: text
          $name: Option
    include_forwarders:
          $type: boolean
          $default: false

  configured_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      type:
        $type:  select
        $values: ["master", "slave"]
        $default: master
      notify:
        $type: boolean
        $default: False

  available_zones:
    $type: edit-group
    $minItems: 1
    $itemName: Zone ${i}
    $prototype:
      $type: group
      $key:
        $type: text
        $name: Name
      file:
        $type: text
      soa:
        $name: SOA
        $type: group
        ns:
          $name: NS
          $type: text
          $placeholder: ns@zone
          $ifEmpty: ns
        contact:
          $type: text
          $placeholder: admin@domain
          $ifEmpty: root@localhost
        serial:
          $default:  auto
          $ifEmpty:  auto
        class:
          $default:  IN
        refresh:
          $default:  8600
          $type: number
        retry:
          $default:  900
          $type: number
        expiry:
          $default:  86000
          $type: number
        nxdomain:
          $name: NXDOMAIN
          $default:  500
          $type: number
        ttl:
          $name: TTL
          $default:  8600
          $type: number
      records:
        $type: group
        A: 
          $type: edit-group
          $optional: true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Hostname
            $type: text
            $name: IP address
        NS:
          $name: NS
          $type: group
          $optional:  true
          '@':
             $type: edit-group
             $minItems: 0
             $prototype:
               $type: text
        CNAME:
          $name: CNAME
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $key:
              $type: text
              $name: Alias
            $type: text
            $name: Hostname
      generate_reverse: 
        $type: group
        $optional:  true
        net:
          $name: Network
          $optional:  true
        for_zones:
          $type: edit-group
          $optional:  true
          $minItems: 0
          $prototype:
            $type: text
 070701000000CC000081B40000000000000000000000015EA152C400000069000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/bind/metadata.yml description:
  Settings for bind nameserver
group: general_system_configuration
after:
  - branch-network   070701000000CD000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/branch-network    070701000000CE000081B40000000000000000000000015EA152C40000093C000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/branch-network/form.yml   branch_network:
  $type: hidden-group
  dedicated_NIC:
    $type: boolean
    $default: True

  nic:
    $default: eth1
    $visibleIf: .dedicated_NIC == true
  ip:
    $default: 192.168.128.1
    $visibleIf: .dedicated_NIC == true
  netmask:
    $default: 255.255.255.0
    $visibleIf: .dedicated_NIC == true

  configure_firewall:
    $type: boolean
    $default: true
    $help: Uncheck to configure firewall manually.

  firewall:
    $type: group
    $visibleIf: .configure_firewall == true
    enable_route:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_NAT:
      $type: boolean
      $default: True
      $visibleIf: ..dedicated_NIC == true
    enable_SLAAC_with_routing:
      $type: boolean
      $default: False
      $visibleIf: .enable_NAT == true
      $name: Force enable IPv6 SLAAC together with forwarding
      $help: Check to enable IPv6 autoconfiguration (SLAAC) even when Branch act as a router.
    open_dhcp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_dns_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_tftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ftp_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_http_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_https_port:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_salt_ports:
      $visibleIf: ..dedicated_NIC == false
      $type: boolean
      $default: True
    open_ssh_port:
      $type: boolean
      $default: True
    open_xmpp_server_port:
      $type: boolean
      $default: True
    open_xmpp_client_port:
      $type: boolean
      $default: True

  forwarder:
    $type: select
    $values:
      - resolver
      - bind
      - dnsmasq
    $default: bind

  forwarder_fallback:
    $type: boolean
    $default: True

  srv_directory:
    $name:  'server directory'
    $type: text
    $default: '/srv/saltboot'
  srv_directory_user:
    $name: 'server directory user'
    $type: text
    $default: 'saltboot'
  srv_directory_group:
    $name: 'server directory group'
    $type: text
    $default: 'saltboot'
070701000000CF000081B40000000000000000000000015EA152C40000005C000000000000000000000000000000000000004800000000susemanager-sls/test/data/formulas/metadata/branch-network/metadata.yml   description:
  Configuration of Branch Server proxy networks
group: SUSE_manager_for_retail
070701000000D0000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations   070701000000D1000081B40000000000000000000000015EA152C4000000BA000000000000000000000000000000000000004500000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/form.yml  mitigations:
  $type: group

  name:
    $type: select
    $values: ["Auto",
              "Auto + No SMT",
              "Off",
              "Manual"
             ]
    $default: Auto
  070701000000D2000081B40000000000000000000000015EA152C400000063000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/metadata.yml  description:
  Settings for kernel options for performance/security.
group: security_configuration
 070701000000D3000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/dhcpd 070701000000D4000081B40000000000000000000000015EA152C400001284000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/dhcpd/form.yml    dhcpd:
  $type: namespace
  domain_name:
    $placeholder: Enter domain name for managed LAN
  domain_name_servers:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
  listen_interfaces:
    $type: edit-group
    $minItems: 1
    $prototype:
      $type: text
    $help: List of interfaces to listen on
    $default:
    - eth1
  authoritative:
    $type: boolean
    $default: True
  max_lease_time:
    $default: 20001
    $type: number
  default_lease_time:
    $default: 20000
    $type: number
  subnets:
    $type: edit-group
    $minItems: 1
    $name: Network Configuration (subnet)
    $itemName: Network ${i}
    $prototype:
        $type: group
        $key:
          $type: text
          $name: Network IP
          $default: 192.168.1.0
        netmask:
          $type: text
          $default: 255.255.255.0
        domain_name:
          $type: text
          $optional: true
        comment:
          $type: text
          $optional: true
        range:
          $type: edit-group
          $name: Dynamic IP Range
          $minItems: 2
          $maxItems: 2
          $prototype:
            $type: text
          $default:
          - 192.168.1.51
          - 192.168.1.151
        broadcast_address:
          $type: text
          $default: 192.168.1.255
        routers:
          $type: edit-group
          $minItems: 1
          $prototype:
            $type: text
          $default:
          - 192.168.1.1
        next_server:
          $type: text
          $default: 192.168.1.1
          $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
          $optional: true
        filename:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/pxelinux.0
          $help: Specify the name of the initial boot file which is to be loaded by a client
          $optional: true
        filename_efi:
          $type: text
          $visibleIf: .next_server != ''
          $default: boot/grub.efi
          $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
          $optional: true
        hosts:
          $type: edit-group
          $minItems: 0
          $itemName: Host ${i}
          $name: Hosts with Static IP Addresses (with Defaults from Subnet)
          $optional: true
          $prototype:
            $key:
                $type: text
                $name: Hostname
            fixed_address:
                $type: text
                $optional: true
                $name: IP Address
            hardware:
                $type: text
                $name: Hardware Type and Address
                $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
                $help: Hardware Identifier - ethernet prefix is mandatory
            next_server:
                $type: text
                $default:
                $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
                $optional: true
            filename:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client
                $optional: true
            filename_efi:
                $type: text
                $visibleIf: .next_server != ''
                $default:
                $help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
                $optional: true
            comment:
                $type: text
  hosts:
    $type: edit-group
    $minItems: 0
    $itemName: Host ${i}
    $name: Hosts with static IP addresses (with global defaults)
    $optional: true
    $prototype:
      $key:
        $type: text
        $name: Hostname
      fixed_address:
        $type: text
        $optional: true
        $name: IP address
      hardware:
        $type: text
        $name: Hardware Type and Address
        $placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
        $help: Hardware Identifier - ethernet prefix is mandatory
      next_server:
        $type: text
        $default:
        $help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
        $optional: true
      filename:
        $type: text
        $visibleIf: .next_server != ''
        $default:
        $help: Specify the name of the initial boot file which is to be loaded by a client
        $optional: true
      comment:
        $type: text
070701000000D5000081B40000000000000000000000015EA152C400000065000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/dhcpd/metadata.yml    description:
  Settings for DHCP server
group: general_system_configuration
after:
  - branch-network   070701000000D6000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003400000000susemanager-sls/test/data/formulas/metadata/grafana   070701000000D7000081B40000000000000000000000015EA152C40000074B000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/grafana/form.yml  grafana:
  $type: namespace

  enabled:
    $type: boolean
    $default: True
    $help: disasbled grafana

  admin_user:
    $type: text
    $name: Default admin user
    $required: true
    $disabled: "!formValues.grafana.enabled"
    
  admin_pass:
    $type: password
    $name: Default admin password  
    $required: true
    $disabled: "!formValues.grafana.enabled"

  datasources:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Configure the data sources used by Grafana.

    prometheus:
      $type: edit-group
      $minItems: 1
      $name: Prometheus
      $help: Configure Prometheus data sources.
      $itemName: Prometheus data source ${i}
      $prototype:
        $type: group
        $disabled: "!formValues.grafana.enabled"
        $key:
          $type: text
          $name: Datasource name
          $default: Prometheus
          $help: Data source name
        url:
          $type: url
          $default: http://localhost:9080
          $required: true
          $name: Prometheus URL
          $help: URL of a Prometheus instance

  dashboards:
    $type: group
    $disabled: "!formValues.grafana.enabled"
    $help: Dashboards to install.

    add_uyuni_dashboard:
      $type: boolean
      $name: Uyuni server dashboard
      $help: Add dashboard for monitoring an Uyuni server
      $default: True

    add_uyuni_clients_dashboard:
      $type: boolean
      $name: Uyuni clients dashboard
      $help: Add dashboard for monitoring Uyuni clients
      $default: True

    add_postgresql_dasboard:
      $type: boolean
      $name: PostgreSQL dashboard
      $help: Add dashboard for monitoring a PostgreSQL database
      $default: True

    add_apache_dashboard:
      $type: boolean
      $name: Apache HTTPD dashboard
      $help: Add dashboard for monitoring an Apache HTTPD server
      $default: True
 070701000000D8000081B40000000000000000000000015EA152C40000003F000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/grafana/metadata.yml  description:
  Enable and configure Grafana.
group: monitoring
 070701000000D9000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/image-synchronize 070701000000DA000081B40000000000000000000000015EA152C400000206000000000000000000000000000000000000004700000000susemanager-sls/test/data/formulas/metadata/image-synchronize/form.yml    image-synchronize:
    $type: hidden-group
    in_highstate:
        $name: Include Image Synchronization in Highstate
        $type: boolean
        $default: false

    whitelist:
        $type: edit-group
        $name: Synchronize only the listed images
        $minItems: 0
        $prototype:
            $type: text
            $help: Image name (without version)

    default_boot_image:
        $type: text
        $name: Default boot image
        $help: Default boot image used for first boot of a terminal
  070701000000DB000081B40000000000000000000000015EA152C400000051000000000000000000000000000000000000004B00000000susemanager-sls/test/data/formulas/metadata/image-synchronize/metadata.yml    description:
  Settings for image synchronization
group: SUSE_manager_for_retail
   070701000000DC000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/locale    070701000000DD000081B40000000000000000000000015EA152C400001537000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/locale/form.yml   # This file is part of locale-formula.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <http://www.gnu.org/licenses/>.

timezone:
  $type: group

  name:
    $type: select
    $values: ["CET",
              "CST6CDT",
              "EET",
              "EST",
              "EST5EDT",
              "GMT",
              "GMT+0",
              "GMT-0",
              "GMT0",
              "Greenwich",
              "HST",
              "MET",
              "MST",
              "MST7MDT",
              "NZ",
              "NZ-CHAT",
              "Navajo",
              "PST8PDT",
              "UCT",
              "UTC",
              "Universal",
              "W-SU",
              "WET",
              "Zulu",
              "Etc/GMT+1",
              "Etc/GMT+2",
              "Etc/GMT+3",
              "Etc/GMT+4",
              "Etc/GMT+5",
              "Etc/GMT+6",
              "Etc/GMT+7",
              "Etc/GMT+8",
              "Etc/GMT+9",
              "Etc/GMT+10",
              "Etc/GMT+11",
              "Etc/GMT+12",
              "Etc/GMT-1",
              "Etc/GMT-2",
              "Etc/GMT-3",
              "Etc/GMT-4",
              "Etc/GMT-5",
              "Etc/GMT-6",
              "Etc/GMT-7",
              "Etc/GMT-8",
              "Etc/GMT-9",
              "Etc/GMT-10",
              "Etc/GMT-11",
              "Etc/GMT-12",
              "Etc/GMT-13",
              "Etc/GMT-14",
              "Etc/GMT",
              "Etc/GMT+0",
              "Etc/GMT-0",
              "Etc/GMT0",
              "Etc/Greenwich",
              "Etc/UCT",
              "Etc/UTC",
              "Etc/Universal",
              "Etc/Zulu" 
              ]
    $default: CET

  hardware_clock_set_to_utc:
    $type: boolean
    $default: True

keyboard_and_language:
  $type: group

  language:
    $type: select
    $values: ["Afrikaans",
              "Arabic",
              "Asturian",
              "Bulgarian",
              "Bengali",
              "Bosnian",
              "Catalan",
              "Czech",
              "Welsh",
              "Danish",
              "German",
              "Greek",
              "English (UK)",
              "English (US)",
              "Spanish",
              "Estonian",
              "Finnish",
              "French",
              "Galician",
              "Gujarati",
              "Hebrew",
              "Hindi",
              "Croatian",
              "Hungarian",
              "Indonesian",
              "Italian",
              "Japanese",
              "Georgian",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Macedonian",
              "Marathi",
              "Norwegian",
              "Dutch",
              "Nynorsk",
              "Punjabi",
              "Polish",
              "Portuguese (Brazilian)",
              "Portuguese",
              "Romanian",
              "Russian",
              "Sinhala",
              "Slovak",
              "Slovenian",
              "Serbian",
              "Swedish",
              "Tamil",
              "Tajik",
              "Thai",
              "Turkish",
              "Ukrainian",
              "Vietnamese",
              "Walloon",
              "Xhosa",
              "Simplified Chinese",
              "Traditional Chinese",
              "Zulu"
              ]
    $default: English (US)

  keyboard_layout:
    $type: select
    $values: ["Arabic",
              "Belgian",
              "Canadian (Multilingual)",
              "Croatian",
              "Czech",
              "Czech (qwerty)",
              "Danish",
              "Dutch",
              "Dvorak",
              "English (UK)",
              "English (US)",
              "Estonian",
              "Finnish",
              "French",
              "French (Canada)",
              "French (Switzerland)",
              "German",
              "German (Switzerland)",
              "German (with deadkeys)",
              "Greek",
              "Hungarian",
              "Icelandic",
              "Italian",
              "Japanese",
              "Khmer",
              "Korean",
              "Lithuanian",
              "Norwegian",
              "Polish",
              "Portuguese",
              "Portuguese (Brazil)",
              "Portuguese (Brazil  US accents)",
              "Romanian",
              "Russian",
              "Serbian",
              "Simplified Chinese",
              "Slovak",
              "Slovak (qwerty)",
              "Slovene",
              "Spanish",
              "Spanish (Asturian variant)",
              "Spanish (CP 850)",
              "Spanish (Latin America)",
              "Swedish",
              "Tajik",
              "Traditional Chinese",
              "Turkish",
              "Ukrainian",
              "US International" 
              ]
    $default: English (US)
 070701000000DE000081B40000000000000000000000015EA152C400000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/locale/metadata.yml   description:
  Settings for language, keyboard, and timezone
group: general_system_configuration
after:
  - users   070701000000DF000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters  070701000000E0000081B40000000000000000000000015EA152C4000003FF000000000000000000000000000000000000004A00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/form.yml node_exporter:
  $type: group
  $help: Prometheus exporter for hardware and OS metrics.

  enabled:
    $type: boolean
    $default: True

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9100"
    $help: Please refer to the documentation for available options.

apache_exporter:
  $type: group
  $help: Prometheus exporter for apache mod_status statistics.

  enabled:
    $type: boolean
    $default: False

  args:
    $name: "Arguments"
    $type: text
    $default: --telemetry.address=":9117"
    $help: Please refer to the documentation for available options.

postgres_exporter:
  $type: group
  $help: Prometheus exporter for PostgreSQL server metrics.

  enabled:
    $type: boolean
    $default: False

  data_source_name:
    $type: text
    $default: postgresql://user:passwd@localhost:5432/database?sslmode=disable

  args:
    $name: "Arguments"
    $type: text
    $default: --web.listen-address=":9187"
    $help: Please refer to the documentation for available options.
 070701000000E1000081B40000000000000000000000015EA152C400000061000000000000000000000000000000000000004E00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/metadata.yml description:
  Enable and configure Prometheus exporters for managed systems.
group: monitoring

   070701000000E2000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003700000000susemanager-sls/test/data/formulas/metadata/prometheus    070701000000E3000081B40000000000000000000000015EA152C40000093C000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/prometheus/form.yml   prometheus:
  $type: namespace

  enabled:
    $type: boolean
    $default: True

  scrape_interval:
    $type: number
    $name: Scrape interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  evaluation_interval:
    $type: number
    $name: Evaluation interval (s)
    $default: 15
    $disabled: "!prometheus.enabled"
    $required: true

  mgr:
    $type: group
    $name: Uyuni Server
    $disabled: "!prometheus.enabled"

    monitor_server:
      $name: Monitor this server
      $type: boolean
      $default: True

    autodiscover_clients:
      $name: Autodiscover clients 
      $type: boolean
      $default: True

    sd_username:
      $type: text
      $name: Username
      $help: Username for auto-discovering clients
      $default: admin
      $visibleIf: .autodiscover_clients == true
      $required: true

    sd_password:
      $type: password
      $name: Password
      $help: Password for auto-discovering clients
      $visibleIf: .autodiscover_clients == true
      $required: true

  alerting:
    $type: group
    $disabled: "!prometheus.enabled"

    alertmanager_service:
      $type: boolean
      $default: True
      $name: Enable local Alertmanager service

    use_local_alertmanager:
      $type: boolean
      $name: Use local Alertmanager
      $help: Use local Alertmanager for this Prometheus instance
      $visibleIf: .alertmanager_service == true
      $default: True

    alertmanagers:
      $type: edit-group
      $minItems: 0
      $itemName: Target ${i}
      $prototype:
        $type: group 
        $key:
          $type: text 
          $name: "IP Address : Port"
          $default: localhost:9093
          $match: ".*\\:\\d{1,5}"

    rule_files:
      $type: edit-group
      $minItems: 0
      $prototype:
        $type: text
        $default: /etc/prometheus/my-rules.yml
        $required: true

  scrape_configs:
    $type: edit-group
    $name: User defined scrape configurations
    $minItems: 0
    $itemName: File-based service discovery ${i}
    $disabled: "!prometheus.enabled"
    $prototype:
      $type: group 
      $key:
        $type: text 
        $name: "Job name"
      files:
        $type: edit-group
        $minItems: 1
        $prototype:
          $type: text
          $default: /etc/prometheus/my-scrape-config.yml
          $required: true


070701000000E4000081B40000000000000000000000015EA152C400000042000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/prometheus/metadata.yml   description:
  Enable and configure Prometheus
group: monitoring

  070701000000E5000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003000000000susemanager-sls/test/data/formulas/metadata/pxe   070701000000E6000081B40000000000000000000000015EA152C4000002B4000000000000000000000000000000000000003900000000susemanager-sls/test/data/formulas/metadata/pxe/form.yml  pxe:
  $type: hidden-group

  kernel_name:
     $name: 'Kernel Filename'
     $type: text
     $default: 'linux'

  initrd_name:
     $name: 'Initrd Filename'
     $type: text
     $default: 'initrd.gz'

  default_kernel_parameters:
     $name: 'Kernel Command Line Parameters'
     $type: text
     $default: 'panic=60 ramdisk_size=710000 ramdisk_blocksize=4096 vga=0x317 splash=silent kiwidebug=0'

  pxe_root_directory:
     $name:  'PXE Root Directory'
     $type: text
     $default: '/srv/saltboot'

  branch_id:
     $name: 'Branch Id'
     $type: text
     $placeholder: 'Enter unique Branch server ID (e.g. "B0001")'
     $help: 'Branch server ID is used as a prefix in terminal ID'
070701000000E7000081B40000000000000000000000015EA152C400000067000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/pxe/metadata.yml  description:
  PXE settings for branch server
group: SUSE_manager_for_retail
after:
  - branch-network
 070701000000E8000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003500000000susemanager-sls/test/data/formulas/metadata/saltboot  070701000000E9000081B40000000000000000000000015EA152C40000157C000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/saltboot/form.yml partitioning:
    $name: Disk Partitioning
    $type: edit-group
    $itemName: Disk ${i}
    $minItems: 1
    $prototype:
        $type: group
        $key:
            $type: text
            $name: Disk Symbolic ID
            $placeholder: Enter disk symbolic ID (e.g. disk1, disk2, md0 for RAID devices)
            $help: Disk Symbolic ID is used together with Partition Symbolic ID for RAID completion.
        type:
            $type: select
            $name: Device Type
            $values:
              - RAID
              - DISK
            $default: DISK
        device:
            $type: text
            $visibleIf: .type == DISK
            $name: Disk Device
            $placeholder: Enter target disk device name (e.g. /dev/sda)
            $optional: true
        level:
            $visibleIf: .type == RAID
            $type: select
            $name: RAID Level
            $values:
              -
              - 0
              - 1
              - 4
              - 5
              - 6
              - 10
              - linear
              - multipath
            $default:
            $optional: true
        devices:
            $visibleIf: .type == RAID
            $type: edit-group
            $name: Symbolic IDs of devices to used for RAID device type
            $minItems: 0
            $prototype:
                $type: text
                $help: E.g. disk1p1, disk2p1, ... Combination of Disk symbolic ID and Partition symbolic ID to describe devices/partitions used to build RAID device.
                $placeholder: Enter combination of Disk and Partition symbolic ID (e.g. disk1part1, disk2part1, ...)
            $optional: True
        disklabel:
            $type: select
            $name: Partition table type
            $values:
              - gpt
              - msdos
              - none
        partitions:
            $type: edit-group
            $itemName: Partition ${i}
            $minItems: 1
            $optional: True
            $visibleIf: .disklabel != "none"
            $prototype:
                $type: group
                $key:
                    $type: text
                    $name: Partition Symbolic ID
                    $help: E.g. p1, p2, ... Together with Disk symbolic ID is used for RAID completion.
                    $placeholder: Enter partition symbolic ID (e.g. part1, part2, ...)
                size_MiB:
                    $type: number
                    $name: Partition Size (MiB)
                    $help: Leave blank to acquire remaining empty space on the disk.
                    $optional: True
                mountpoint:
                    $type: text
                    $name: Device Mount Point
                    $help: What should the partition be mount as - /, swap, /var, ...
                    $optional: True
                format:
                    $type: select
                    $name: Filesystem Format
                    $values:
                      -
                      - btrfs
                      - ext4
                      - xfs
                      - vfat
                      - swap
                    $optional: True
                image:
                    $type: text
                    $name: OS Image to Deploy
                    $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
                    $optional: True
                image_version:
                    $visibleIf: .image != ''
                    $type: text
                    $help: Version of OS Image. Leave blank for most recent.
                    $optional: True
                luks_pass:
                    $optional: True
                    $type: text
                    $name: Partition Encryption Password
                    $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
                flags:
                    $type: select
                    $name: Partition Flags
                    $values:
                      -
                      - swap
                      - raid
                      - bios_grub
                      - esp
                      - boot
                    $default:
        mountpoint:
            $type: text
            $name: Device Mount Point
            $help: What should the partition be mount as - /, swap, /var, ...
            $optional: True
            $visibleIf: .disklabel == "none"
        format:
            $type: select
            $name: Filesystem Format
            $visibleIf: .disklabel == "none"
            $values:
              -
              - btrfs
              - ext4
              - xfs
              - vfat
              - swap
            $optional: True
        image:
            $visibleIf: .disklabel == "none"
            $type: text
            $name: OS Image to Deploy
            $help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
            $optional: True
        image_version:
            $visibleIf: .image != ''
            $type: text
            $help: Version of OS Image. Leave blank for most recent.
            $optional: True
        luks_pass:
            $visibleIf: .disklabel == "none"
            $optional: True
            $type: text
            $name: Partition Encryption Password
            $help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
070701000000EA000081B40000000000000000000000015EA152C40000005B000000000000000000000000000000000000004200000000susemanager-sls/test/data/formulas/metadata/saltboot/metadata.yml description:
  Control deployment and boot of POS terminals
group: SUSE_manager_for_retail
 070701000000EB000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/tftpd 070701000000EC000081B40000000000000000000000015EA152C400000137000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/tftpd/form.yml    tftpd:
  $type: hidden-group

  listen_ip:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  root_dir:
     $name: 'TFTP base directory'
     $type: text
     $default: '/srv/tftpboot'

  tftpd_user:
     $name: 'run TFTP under user'
     $type: text
     $default: 'tftp'

      
 070701000000ED000081B40000000000000000000000015EA152C400000068000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/tftpd/metadata.yml    description:
  Settings for tftpd service
group: general_system_configuration
after:
  - branch-network
070701000000EE000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/virtualization-host   070701000000EF000081B40000000000000000000000015EA152C40000005F000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/virtualization-host/form.yml  hypervisor:
  $type: select
  $values: ["KVM",
            "Xen"
            ]
  $default: KVM
 070701000000F0000081B40000000000000000000000015EA152C400000055000000000000000000000000000000000000004D00000000susemanager-sls/test/data/formulas/metadata/virtualization-host/metadata.yml  description:
  Settings for virtualization host.
group: general_system_configuration
   070701000000F1000041FD0000000000000000000000015EA152C400000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/vsftpd    070701000000F2000081B40000000000000000000000015EA152C400000604000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/vsftpd/form.yml   vsftpd_config:
  $type: hidden-group

  anon_root:
     $name: 'FTP server directory'
     $type: text
     $default: '/srv/ftp'

     
  listen_address:
     $name: 'Internal Network Address'
     $type: text
     $optional: True

  ssl_enable:
     $name:  'Enable ssl'
     $type: boolean
     $default: false
     
  secure_chroot_dir: 
     $name:  'Chroot dir'
     $type: text
     $default: '/usr/share/empty'

  anonymous_enable:
     $name:  'Allow anonymous FTP'
     $type: boolean
     $default: true

  allow_anon_ssl:
     $name:  'Allow SSL for anonymous'
     $type: boolean
     $default: true

  listen:
     $name:  'Run standalone'
     $type: boolean
     $default: true

  local_enable:
     $name:  'Allow local users'
     $type: boolean
     $default: true

  dirmessage_enable:
     $name:  'Activate directory messages'
     $type: boolean
     $default: true

  use_localtime: 
     $name:  'Use localtime'
     $type: boolean
     $default: true

  xferlog_enable: 
     $name:  'Activate logging of transfers'
     $type: boolean
     $default: true

  connect_from_port_20: 
     $name:  'Connect from port 20'
     $type: boolean
     $default: true

  pam_service_name: 
     $name:  'PAM service name'
     $type: text
     $default: 'vsftpd'

  rsa_cert_file:
     $name:  'RSA certificate file'
     $type: text
     $default: '/etc/ssl/certs/[ssl-cert-file].pem'

  rsa_private_key_file:
     $name:  'RSA private key file'
     $type: text
     $default: '/etc/ssl/private/[ssl-cert-file].key'

      070701000000F3000081B40000000000000000000000015EA152C400000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/vsftpd/metadata.yml   description:
  Settings for vsftpd for branchserver
group: general_system_configuration
after:
  - branch-network   070701000000F4000081B40000000000000000000000015EA152C400000018000000000000000000000000000000000000002E00000000susemanager-sls/test/data/group_formulas.json {"9":["locale","tftpd"]}070701000000F5000081B40000000000000000000000015EA152C400000077000000000000000000000000000000000000002F00000000susemanager-sls/test/data/minion_formulas.json    {"suma-refhead-min-centos7.mgr.suse.de":["branch-network"],"suma-refhead-min-sles12sp4.mgr.suse.de":["branch-network"]} 070701000000F6000081B40000000000000000000000015EA152C400001F64000000000000000000000000000000000000002400000000susemanager-sls/test/test_engine.py   import logging
import pytest
import psycopg2
import shlex
import subprocess
from mgr_events import Responder, DEFAULT_COMMIT_BURST
from mock import MagicMock, patch, call
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database, drop_database


ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger('mgr_events')
log.setLevel(logging.DEBUG)
log.addHandler(ch)


@pytest.fixture(scope="session")
def postgres(request):
    proc = subprocess.Popen(shlex.split("su postgres -c \"pg_ctl -D ~/data -l ~/logfile start\""))
    def finalizer():
        subprocess.Popen(shlex.split("su postgres -c \"pg_ctl stop -D /var/lib/pgsql/data\""))
    request.addfinalizer(finalizer)
    outs, errs = proc.communicate(timeout=15)
    yield proc


@pytest.fixture(scope="session")
def db_engine(postgres):
    return create_engine("postgresql://postgres@/test")


@pytest.fixture
def db_connection(db_engine):
    if not database_exists(db_engine.url):
        create_database(db_engine.url)
    with psycopg2.connect(user='postgres', host="localhost", dbname="test") as connection:
        yield connection
    drop_database(db_engine.url)


def new_connection():
    return psycopg2.connect(user='postgres', host="localhost", dbname="test")


@pytest.fixture
def create_tables(db_connection):
    sql = """CREATE TABLE suseSaltEvent (
        id SERIAL PRIMARY KEY,
        minion_id CHARACTER VARYING(256),
        data TEXT NOT NULL,
        queue NUMERIC NOT NULL
    );"""
    db_connection.cursor().execute(sql)
    db_connection.commit()


def delete_table(conn, table):
    conn.cursor().execute("DELETE FROM %s" % table)
    conn.commit()


@pytest.fixture
def responder(db_connection, create_tables):
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        return Responder(
            MagicMock(),  # mock event_bus
            {
                'postgres_db': {
                     'dbname': 'tests',
                     'user': 'postgres',
                     'password': '',
                     'host': 'localhost',
                     'notify_channel': 'suseSaltEvent'
                 },
                'events': {
                    'thread_pool_size': 3
                }
            }
        )


def test_connection_recovery_on_insert(db_connection, responder):
    disposable_connection = new_connection()
    responder.connection = disposable_connection
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder._insert('salt/minion/2/start', {'value': 2})
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 2


def test_connection_recovery_on_commit(db_connection, responder):
    responder.connection = new_connection()
    responder._insert('salt/minion/1/start', {'value': 1})
    responder.connection.close()
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        mock_psycopg2.connect.return_value = db_connection
        responder.attempt_commit()
    responder.connection.commit()
    responder.cursor.execute("SELECT * FROM suseSaltEvent")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_insert_start_event(responder, db_connection):
    responder.event_bus.unpack.return_value = ('salt/minion/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1


def test_insert_job_return_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_insert_batch_start_event(responder):
    responder.event_bus.unpack.return_value = ('salt/batch/12345/start', {'value': 1})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert resp
    assert responder.tokens == DEFAULT_COMMIT_BURST - 1

def test_discard_batch_presence_ping_event(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'metadata': {'batch-mode': True}})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 0


def test_keep_presence_ping_event_without_batch(responder):
    responder.event_bus.unpack.return_value = ('salt/job/12345/ret/6789', {'value': 1, 'fun': 'test.ping', 'id': 'testminion'})
    responder.add_event_to_queue('')
    responder.cursor.execute("SELECT * FROM suseSaltEvent;")
    resp = responder.cursor.fetchall()
    assert len(resp) == 1


def test_commit_scheduled_on_init(responder):
    assert responder.event_bus.io_loop.call_later.call_count == 1


def test_commit_empty_queue(responder):
    responder.counters = [0, 0, 0, 0]
    with patch.object(responder, 'event_bus', MagicMock()):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            responder.attempt_commit()
            assert responder.connection.commit.call_count == 0
        assert responder.tokens == DEFAULT_COMMIT_BURST


def test_postgres_notification(responder):
    with patch.object(responder, 'cursor'):
        responder._insert('salt/minion/1/start', {'value': 1, 'id': 'testminion'})
        assert responder.counters == [0, 0, 0, 0]
        assert responder.tokens == DEFAULT_COMMIT_BURST -1
        assert responder.cursor.execute.mock_calls[-1:] == [call("NOTIFY suseSaltEvent, '0,0,1,0';")]

def test_add_token(responder):
    responder.tokens = 0
    responder.add_token()
    assert responder.tokens == 1

def test_add_token_max(responder):
    responder.add_token()
    assert responder.tokens == DEFAULT_COMMIT_BURST

def test_commit_avoidance_without_tokens(responder):
    with patch.object(responder, 'cursor'):
        with patch.object(responder, 'connection') as mock_connection:
            mock_connection.closed = False
            mock_connection.encoding = 'utf-8'
            responder.tokens = 0
            responder._insert('salt/minion/1/start', {'id': 'testminion', 'value': 1})
            assert responder.counters == [0, 0, 1, 0]
            assert responder.tokens == 0
            assert responder.connection.commit.call_count == 0
            assert responder.cursor.execute.mock_calls == [call('INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);', ('testminion', '{"tag": "salt/minion/1/start", "data": {"id": "testminion", "value": 1}}', 2))]


def test_postgres_connect(db_connection, responder):
    disposable_connection = new_connection()
    disposable_connection.close()
    responder.connection = disposable_connection
    with patch('mgr_events.time') as mock_time:
        with patch('mgr_events.psycopg2') as mock_psycopg2:
            mock_psycopg2.connect.side_effect = [psycopg2.OperationalError, db_connection]
            mock_psycopg2.OperationalError = psycopg2.OperationalError
            responder.db_keepalive()
            assert mock_psycopg2.connect.call_count == 2
    mock_time.sleep.assert_called_once_with(5)


def test_postgres_connect_with_port(responder):
    responder.config['postgres_db']['port'] = '1234'
    with patch('mgr_events.psycopg2') as mock_psycopg2:
        responder._connect_to_database()
        mock_psycopg2.connect.assert_called_once_with(u"dbname='tests' user='postgres' host='localhost' port='1234' password=''")
070701000000F7000081B40000000000000000000000015EA152C40000039B000000000000000000000000000000000000003000000000susemanager-sls/test/test_pillar_suma_minion.py   # -*- coding: utf-8 -*-
'''
:codeauthor:    Michael Calmer <Michael.Calmer@suse.com>
'''

from mock import MagicMock, patch

import sys
sys.path.append("../modules/pillar")
import os
import copy

import suma_minion


def test_virtual():
    '''
    Test virtual returns the module name
    '''
    assert suma_minion.__virtual__() == True

def test_formula_pillars():
    '''
    Test formula ordering
    '''
    suma_minion.FORMULAS_DATA_PATH = os.path.sep.join([os.path.abspath(''), 'data'])
    suma_minion.FORMULA_ORDER_FILE = os.path.sep.join([os.path.abspath(''), 'data', 'formula_order.json'])
    suma_minion.MANAGER_FORMULAS_METADATA_MANAGER_PATH = os.path.sep.join([os.path.abspath(''), 'data', 'formulas', 'metadata'])
    pillar = suma_minion.formula_pillars("suma-refhead-min-sles12sp4.mgr.suse.de", [9])
    assert "formulas" in pillar
    assert pillar["formulas"] == ['branch-network', 'locale', 'tftpd']

 07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!                                                                                                                                                                                                                                                                                                                                                                                                                                        