1 总入口 ################################## 源码文件:st2/st2actions/bin/st2actionrunner
import sys
from st2actions.cmd import actionrunner
if __name__ == '__main__':
sys.exit(actionrunner.main())
2 调用 ######################################### 源码文件:st2/st2actions/st2actions/cmd/actionrunner.py
from st2common.util.monkey_patch import monkey_patch
monkey_patch()
import os
import signal
import sys
from st2actions import config
from st2actions import scheduler, worker
from st2common import log as logging
from st2common.service_setup import setup as common_setup
from st2common.service_setup import teardown as common_teardown
__all__ = [
'main'
]
LOG = logging.getLogger(__name__)
def _setup_sigterm_handler():
def sigterm_handler(signum=None, frame=None):
# This will cause SystemExit to be throw and allow for component cleanup.
sys.exit(0)
# Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
# be thrown. We catch SystemExit and handle cleanup there.
signal.signal(signal.SIGTERM, sigterm_handler)
def _setup():
common_setup(service='actionrunner', config=config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=True)
_setup_sigterm_handler()
def _run_worker():
LOG.info('(PID=%s) Worker started.', os.getpid())
components = [
scheduler.get_scheduler(),
worker.get_worker()
]
try:
for component in components:
component.start()
for component in components:
component.wait()
except (KeyboardInterrupt, SystemExit):
LOG.info('(PID=%s) Worker stopped.', os.getpid())
errors = False
for component in components:
try:
component.shutdown()
except:
LOG.exception('Unable to shutdown %s.', component.__class__.__name__)
errors = True
if errors:
return 1
except:
LOG.exception('(PID=%s) Worker unexpectedly stopped.', os.getpid())
return 1
return 0
def _teardown():
common_teardown()
def main():
try:
_setup()
return _run_worker()
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception('(PID=%s) Worker quit due to exception.', os.getpid())
return 1
finally:
_teardown()
3 调用 ######################################### 源码文件:st2/st2common/st2common/service_setup.py
from __future__ import absolute_import
import os
import traceback
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.logging import DEFAULT_LOGGING_CONF_PATH
from st2common.transport.bootstrap_utils import register_exchanges_with_retry
from st2common.signal_handlers import register_common_signal_handlers
from st2common.util.debugging import enable_debugging
from st2common.models.utils.profiling import enable_profiling
from st2common import triggers
from st2common.rbac.migrations import run_all as run_all_rbac_migrations
# Note: This is here for backward compatibility.
# Function has been moved in a standalone module to avoid expensive in-direct
# import costs
from st2common.database_setup import db_setup
from st2common.database_setup import db_teardown
__all__ = [
'setup',
'teardown',
'db_setup',
'db_teardown'
]
LOG = logging.getLogger(__name__)
def setup(service, config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=True, register_internal_trigger_types=False,
run_migrations=True, config_args=None):
"""
Common setup function.
Currently it performs the following operations:
1. Parses config and CLI arguments
2. Establishes DB connection
3. Set log level for all the loggers to DEBUG if --debug flag is present or
if system.debug config option is set to True.
4. Registers RabbitMQ exchanges
5. Registers common signal handlers
6. Register internal trigger types
:param service: Name of the service.
:param config: Config object to use to parse args.
"""
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)
# Parse args to setup config.
if config_args:
config.parse_args(config_args)
else:
config.parse_args()
config_file_paths = cfg.CONF.config_file
config_file_paths = [os.path.abspath(path) for path in config_file_paths]
LOG.debug('Using config files: %s', ','.join(config_file_paths))
# Setup logging.
logging_config_path = config.get_logging_config_path()
logging_config_path = os.path.abspath(logging_config_path)
LOG.debug('Using logging config: %s', logging_config_path)
try:
logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr,
excludes=cfg.CONF.log.excludes)
except KeyError as e:
tb_msg = traceback.format_exc()
if 'log.setLevel' in tb_msg:
msg = 'Invalid log level selected. Log level names need to be all uppercase.'
msg += '\n\n' + getattr(e, 'message', str(e))
raise KeyError(msg)
else:
raise e
if cfg.CONF.debug or cfg.CONF.system.debug:
enable_debugging()
if cfg.CONF.profile:
enable_profiling()
# All other setup which requires config to be parsed and logging to
# be correctly setup.
if setup_db:
db_setup()
if register_mq_exchanges:
register_exchanges_with_retry()
if register_signal_handlers:
register_common_signal_handlers()
if register_internal_trigger_types:
triggers.register_internal_trigger_types()
# TODO: This is a "not so nice" workaround until we have a proper migration system in place
if run_migrations:
run_all_rbac_migrations()
def teardown():
"""
Common teardown function.
"""
db_teardown()
4 调用 ###################################### 源码文件:st2/st2common/st2common/database_setup.py
from oslo_config import cfg
from st2common.models import db
from st2common.persistence import db_init
__all__ = [
'db_config',
'db_setup',
'db_teardown'
]
def db_config():
username = getattr(cfg.CONF.database, 'username', None)
password = getattr(cfg.CONF.database, 'password', None)
return {'db_name': cfg.CONF.database.db_name,
'db_host': cfg.CONF.database.host,
'db_port': cfg.CONF.database.port,
'username': username,
'password': password,
'ssl': cfg.CONF.database.ssl,
'ssl_keyfile': cfg.CONF.database.ssl_keyfile,
'ssl_certfile': cfg.CONF.database.ssl_certfile,
'ssl_cert_reqs': cfg.CONF.database.ssl_cert_reqs,
'ssl_ca_certs': cfg.CONF.database.ssl_ca_certs,
'ssl_match_hostname': cfg.CONF.database.ssl_match_hostname}
def db_setup(ensure_indexes=True):
"""
Creates the database and indexes (optional).
"""
db_cfg = db_config()
db_cfg['ensure_indexes'] = ensure_indexes
connection = db_init.db_setup_with_retry(**db_cfg)
return connection
def db_teardown():
"""
Disconnects from the database.
"""
return db.db_teardown()
5 调用 ######################################### 源码文件:st2/st2actions/st2actions/scheduler.py
from kombu import Connection
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.liveaction import LiveActionDB
from st2common.services import action as action_service
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.transport import consumers
from st2common.transport import utils as transport_utils
from st2common.util import action_db as action_utils
from st2common.transport.queues import ACTIONSCHEDULER_REQUEST_QUEUE
__all__ = [
'ActionExecutionScheduler',
'get_scheduler'
]
LOG = logging.getLogger(__name__)
class ActionExecutionScheduler(consumers.MessageHandler):
message_type = LiveActionDB
def process(self, request):
"""Schedules the LiveAction and publishes the request
to the appropriate action runner(s).
LiveAction in statuses other than "requested" are ignored.
:param request: Action execution request.
:type request: ``st2common.models.db.liveaction.LiveActionDB``
"""
if request.status != action_constants.LIVEACTION_STATUS_REQUESTED:
LOG.info('%s is ignoring %s (id=%s) with "%s" status.',
self.__class__.__name__, type(request), request.id, request.status)
return
try:
liveaction_db = action_utils.get_liveaction_by_id(request.id)
except StackStormDBObjectNotFoundError:
LOG.exception('Failed to find liveaction %s in the database.', request.id)
raise
# Apply policies defined for the action.
liveaction_db = self._apply_pre_run_policies(liveaction_db=liveaction_db)
# Exit if the status of the request is no longer runnable.
# The status could have be changed by one of the policies.
if liveaction_db.status not in [action_constants.LIVEACTION_STATUS_REQUESTED,
action_constants.LIVEACTION_STATUS_SCHEDULED]:
LOG.info('%s is ignoring %s (id=%s) with "%s" status after policies are applied.',
self.__class__.__name__, type(request), request.id, liveaction_db.status)
return
# Update liveaction status to "scheduled".
if liveaction_db.status == action_constants.LIVEACTION_STATUS_REQUESTED:
liveaction_db = action_service.update_status(
liveaction_db, action_constants.LIVEACTION_STATUS_SCHEDULED, publish=False)
# Publish the "scheduled" status here manually. Otherwise, there could be a
# race condition with the update of the action_execution_db if the execution
# of the liveaction completes first.
LiveAction.publish_status(liveaction_db)
def _apply_pre_run_policies(self, liveaction_db):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction_db.action, enabled=True)
LOG.debug('Applying %s pre_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying pre_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction_db.id)))
liveaction_db = driver.apply_before(liveaction_db)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
if liveaction_db.status == action_constants.LIVEACTION_STATUS_DELAYED:
break
return liveaction_db
def get_scheduler():
with Connection(transport_utils.get_messaging_urls()) as conn:
return ActionExecutionScheduler(conn, [ACTIONSCHEDULER_REQUEST_QUEUE])
6 调用 ####################################### 源码文件:st2/st2actions/st2actions/worker.py
import sys
import traceback
from kombu import Connection
from st2actions.container.base import RunnerContainer
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.exceptions.actionrunner import ActionRunnerException
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.execution import ActionExecution
from st2common.services import executions
from st2common.transport.consumers import MessageHandler
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.transport import utils as transport_utils
from st2common.util import action_db as action_utils
from st2common.util import system_info
from st2common.transport import queues
__all__ = [
'ActionExecutionDispatcher',
'get_worker'
]
LOG = logging.getLogger(__name__)
ACTIONRUNNER_QUEUES =