root/lib/pacemaker/pcmk_sched_allocate.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. check_params
  2. failcount_clear_action_exists
  3. check_failure_threshold
  4. apply_exclusive_discovery
  5. apply_stickiness
  6. apply_shutdown_locks
  7. count_available_nodes
  8. apply_node_criteria
  9. allocate_resources
  10. clear_failcounts_if_orphaned
  11. schedule_resource_actions
  12. is_managed
  13. any_managed_resources
  14. needs_fencing
  15. needs_shutdown
  16. add_nondc_fencing
  17. schedule_fencing
  18. schedule_fencing_and_shutdowns
  19. log_resource_details
  20. log_all_actions
  21. log_unrunnable_actions
  22. unpack_cib
  23. pcmk__schedule_actions

   1 /*
   2  * Copyright 2004-2022 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <crm/crm.h>
  13 #include <crm/cib.h>
  14 #include <crm/msg_xml.h>
  15 #include <crm/common/xml.h>
  16 #include <crm/common/xml_internal.h>
  17 
  18 #include <glib.h>
  19 
  20 #include <crm/pengine/status.h>
  21 #include <pacemaker-internal.h>
  22 #include "libpacemaker_private.h"
  23 
  24 CRM_TRACE_INIT_DATA(pacemaker);
  25 
  26 /*!
  27  * \internal
  28  * \brief Do deferred action checks after allocation
  29  *
  30  * When unpacking the resource history, the scheduler checks for resource
  31  * configurations that have changed since an action was run. However, at that
  32  * time, bundles using the REMOTE_CONTAINER_HACK don't have their final
  33  * parameter information, so instead they add a deferred check to a list. This
  34  * function processes one entry in that list.
  35  *
  36  * \param[in] rsc       Resource that action history is for
  37  * \param[in] node      Node that action history is for
  38  * \param[in] rsc_op    Action history entry
  39  * \param[in] check     Type of deferred check to do
  40  * \param[in] data_set  Working set for cluster
  41  */
  42 static void
  43 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
     /* [previous][next][first][last][top][bottom][index][help] */
  44              enum pe_check_parameters check, pe_working_set_t *data_set)
  45 {
  46     const char *reason = NULL;
  47     op_digest_cache_t *digest_data = NULL;
  48 
  49     switch (check) {
  50         case pe_check_active:
  51             if (pcmk__check_action_config(rsc, node, rsc_op)
  52                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
  53                                     data_set)) {
  54                 reason = "action definition changed";
  55             }
  56             break;
  57 
  58         case pe_check_last_failure:
  59             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
  60             switch (digest_data->rc) {
  61                 case RSC_DIGEST_UNKNOWN:
  62                     crm_trace("Resource %s history entry %s on %s has "
  63                               "no digest to compare",
  64                               rsc->id, ID(rsc_op), node->details->id);
  65                     break;
  66                 case RSC_DIGEST_MATCH:
  67                     break;
  68                 default:
  69                     reason = "resource parameters have changed";
  70                     break;
  71             }
  72             break;
  73     }
  74     if (reason != NULL) {
  75         pe__clear_failcount(rsc, node, reason, data_set);
  76     }
  77 }
  78 
  79 /*!
  80  * \internal
  81  * \brief Check whether a resource has failcount clearing scheduled on a node
  82  *
  83  * \param[in] node  Node to check
  84  * \param[in] rsc   Resource to check
  85  *
  86  * \return true if \p rsc has failcount clearing scheduled on \p node,
  87  *         otherwise false
  88  */
  89 static bool
  90 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92     GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
  93 
  94     if (list != NULL) {
  95         g_list_free(list);
  96         return true;
  97     }
  98     return false;
  99 }
 100 
 101 /*!
 102  * \internal
 103  * \brief Ban a resource from a node if it reached its failure threshold there
 104  *
 105  * \param[in] rsc       Resource to check failure threshold for
 106  * \param[in] node      Node to check \p rsc on
 107  */
 108 static void
 109 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111     // If this is a collective resource, apply recursively to children instead
 112     if (rsc->children != NULL) {
 113         g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
 114                        node);
 115         return;
 116 
 117     } else if (failcount_clear_action_exists(node, rsc)) {
 118         /* Don't force the resource away from this node due to a failcount
 119          * that's going to be cleared.
 120          *
 121          * @TODO Failcount clearing can be scheduled in
 122          * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
 123          * schedule_resource_actions() via check_params(). This runs well before
 124          * then, so it cannot detect those, meaning we might check the migration
 125          * threshold when we shouldn't. Worst case, we stop or move the
 126          * resource, then move it back in the next transition.
 127          */
 128         return;
 129 
 130     } else {
 131         pe_resource_t *failed = NULL;
 132 
 133         if (pcmk__threshold_reached(rsc, node, &failed)) {
 134             resource_location(failed, node, -INFINITY, "__fail_limit__",
 135                               rsc->cluster);
 136         }
 137     }
 138 }
 139 
 140 /*!
 141  * \internal
 142  * \brief If resource has exclusive discovery, ban node if not allowed
 143  *
 144  * Location constraints have a resource-discovery option that allows users to
 145  * specify where probes are done for the affected resource. If this is set to
 146  * exclusive, probes will only be done on nodes listed in exclusive constraints.
 147  * This function bans the resource from the node if the node is not listed.
 148  *
 149  * \param[in] rsc   Resource to check
 150  * \param[in] node  Node to check \p rsc on
 151  */
 152 static void
 153 apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 154 {
 155     if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
 156         pe_node_t *match = NULL;
 157 
 158         // If this is a collective resource, apply recursively to children
 159         g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
 160 
 161         match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 162         if ((match != NULL)
 163             && (match->rsc_discover_mode != pe_discover_exclusive)) {
 164             match->weight = -INFINITY;
 165         }
 166     }
 167 }
 168 
 169 /*!
 170  * \internal
 171  * \brief Apply stickiness to a resource if appropriate
 172  *
 173  * \param[in] rsc       Resource to check for stickiness
 174  * \param[in] data_set  Cluster working set
 175  */
 176 static void
 177 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179     pe_node_t *node = NULL;
 180 
 181     // If this is a collective resource, apply recursively to children instead
 182     if (rsc->children != NULL) {
 183         g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
 184         return;
 185     }
 186 
 187     /* A resource is sticky if it is managed, has stickiness configured, and is
 188      * active on a single node.
 189      */
 190     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
 191         || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
 192         return;
 193     }
 194 
 195     node = rsc->running_on->data;
 196 
 197     /* In a symmetric cluster, stickiness can always be used. In an
 198      * asymmetric cluster, we have to check whether the resource is still
 199      * allowed on the node, so we don't keep the resource somewhere it is no
 200      * longer explicitly enabled.
 201      */
 202     if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
 203         && (pe_hash_table_lookup(rsc->allowed_nodes,
 204                                  node->details->id) == NULL)) {
 205         pe_rsc_debug(rsc,
 206                      "Ignoring %s stickiness because the cluster is "
 207                      "asymmetric and %s is not explicitly allowed",
 208                      rsc->id, pe__node_name(node));
 209         return;
 210     }
 211 
 212     pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
 213                  rsc->id, rsc->stickiness, pe__node_name(node));
 214     resource_location(rsc, node, rsc->stickiness, "stickiness",
 215                       rsc->cluster);
 216 }
 217 
 218 /*!
 219  * \internal
 220  * \brief Apply shutdown locks for all resources as appropriate
 221  *
 222  * \param[in] data_set  Cluster working set
 223  */
 224 static void
 225 apply_shutdown_locks(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227     if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
 228         return;
 229     }
 230     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
 231         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 232 
 233         rsc->cmds->shutdown_lock(rsc);
 234     }
 235 }
 236 
 237 /*!
 238  * \internal
 239  * \brief Calculate the number of available nodes in the cluster
 240  *
 241  * \param[in] data_set  Cluster working set
 242  */
 243 static void
 244 count_available_nodes(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 245 {
 246     if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
 247         return;
 248     }
 249 
 250     // @COMPAT for API backward compatibility only (cluster does not use value)
 251     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
 252         pe_node_t *node = (pe_node_t *) iter->data;
 253 
 254         if ((node != NULL) && (node->weight >= 0) && node->details->online
 255             && (node->details->type != node_ping)) {
 256             data_set->max_valid_nodes++;
 257         }
 258     }
 259     crm_trace("Online node count: %d", data_set->max_valid_nodes);
 260 }
 261 
 262 /*
 263  * \internal
 264  * \brief Apply node-specific scheduling criteria
 265  *
 266  * After the CIB has been unpacked, process node-specific scheduling criteria
 267  * including shutdown locks, location constraints, resource stickiness,
 268  * migration thresholds, and exclusive resource discovery.
 269  */
 270 static void
 271 apply_node_criteria(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 {
 273     crm_trace("Applying node-specific scheduling criteria");
 274     apply_shutdown_locks(data_set);
 275     count_available_nodes(data_set);
 276     pcmk__apply_locations(data_set);
 277     g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
 278 
 279     for (GList *node_iter = data_set->nodes; node_iter != NULL;
 280          node_iter = node_iter->next) {
 281         for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
 282              rsc_iter = rsc_iter->next) {
 283             pe_node_t *node = (pe_node_t *) node_iter->data;
 284             pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
 285 
 286             check_failure_threshold(rsc, node);
 287             apply_exclusive_discovery(rsc, node);
 288         }
 289     }
 290 }
 291 
 292 /*!
 293  * \internal
 294  * \brief Allocate resources to nodes
 295  *
 296  * \param[in] data_set  Cluster working set
 297  */
 298 static void
 299 allocate_resources(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301     GList *iter = NULL;
 302 
 303     crm_trace("Allocating resources to nodes");
 304 
 305     if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
 306         pcmk__sort_resources(data_set);
 307     }
 308     pcmk__show_node_capacities("Original", data_set);
 309 
 310     if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
 311         /* Allocate remote connection resources first (which will also allocate
 312          * any colocation dependencies). If the connection is migrating, always
 313          * prefer the partial migration target.
 314          */
 315         for (iter = data_set->resources; iter != NULL; iter = iter->next) {
 316             pe_resource_t *rsc = (pe_resource_t *) iter->data;
 317 
 318             if (rsc->is_remote_node) {
 319                 pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
 320                              rsc->id);
 321                 rsc->cmds->assign(rsc, rsc->partial_migration_target);
 322             }
 323         }
 324     }
 325 
 326     /* now do the rest of the resources */
 327     for (iter = data_set->resources; iter != NULL; iter = iter->next) {
 328         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 329 
 330         if (!rsc->is_remote_node) {
 331             pe_rsc_trace(rsc, "Allocating %s resource '%s'",
 332                          crm_element_name(rsc->xml), rsc->id);
 333             rsc->cmds->assign(rsc, NULL);
 334         }
 335     }
 336 
 337     pcmk__show_node_capacities("Remaining", data_set);
 338 }
 339 
 340 /*!
 341  * \internal
 342  * \brief Schedule fail count clearing on online nodes if resource is orphaned
 343  *
 344  * \param[in] rsc       Resource to check
 345  * \param[in] data_set  Cluster working set
 346  */
 347 static void
 348 clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350     if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
 351         return;
 352     }
 353     crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
 354 
 355     /* There's no need to recurse into rsc->children because those
 356      * should just be unallocated clone instances.
 357      */
 358 
 359     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
 360         pe_node_t *node = (pe_node_t *) iter->data;
 361         pe_action_t *clear_op = NULL;
 362 
 363         if (!node->details->online) {
 364             continue;
 365         }
 366         if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
 367                              data_set) == 0) {
 368             continue;
 369         }
 370 
 371         clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
 372 
 373         /* We can't use order_action_then_stop() here because its
 374          * pe_order_preserve breaks things
 375          */
 376         pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
 377                            NULL, pe_order_optional, data_set);
 378     }
 379 }
 380 
 381 /*!
 382  * \internal
 383  * \brief Schedule any resource actions needed
 384  *
 385  * \param[in] data_set  Cluster working set
 386  */
 387 static void
 388 schedule_resource_actions(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390     // Process deferred action checks
 391     pe__foreach_param_check(data_set, check_params);
 392     pe__free_param_checks(data_set);
 393 
 394     if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
 395         crm_trace("Scheduling probes");
 396         pcmk__schedule_probes(data_set);
 397     }
 398 
 399     if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
 400         g_list_foreach(data_set->resources,
 401                        (GFunc) clear_failcounts_if_orphaned, data_set);
 402     }
 403 
 404     crm_trace("Scheduling resource actions");
 405     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
 406         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 407 
 408         rsc->cmds->create_actions(rsc);
 409     }
 410 }
 411 
 412 /*!
 413  * \internal
 414  * \brief Check whether a resource or any of its descendants are managed
 415  *
 416  * \param[in] rsc  Resource to check
 417  *
 418  * \return true if resource or any descendent is managed, otherwise false
 419  */
 420 static bool
 421 is_managed(const pe_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
 422 {
 423     if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
 424         return true;
 425     }
 426     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
 427         if (is_managed((pe_resource_t *) iter->data)) {
 428             return true;
 429         }
 430     }
 431     return false;
 432 }
 433 
 434 /*!
 435  * \internal
 436  * \brief Check whether any resources in the cluster are managed
 437  *
 438  * \param[in] data_set  Cluster working set
 439  *
 440  * \return true if any resource is managed, otherwise false
 441  */
 442 static bool
 443 any_managed_resources(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 444 {
 445     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
 446         if (is_managed((pe_resource_t *) iter->data)) {
 447             return true;
 448         }
 449     }
 450     return false;
 451 }
 452 
 453 /*!
 454  * \internal
 455  * \brief Check whether a node requires fencing
 456  *
 457  * \param[in] node          Node to check
 458  * \param[in] have_managed  Whether any resource in cluster is managed
 459  * \param[in] data_set      Cluster working set
 460  *
 461  * \return true if \p node should be fenced, otherwise false
 462  */
 463 static bool
 464 needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 465 {
 466     return have_managed && node->details->unclean
 467            && pe_can_fence(data_set, node);
 468 }
 469 
 470 /*!
 471  * \internal
 472  * \brief Check whether a node requires shutdown
 473  *
 474  * \param[in] node          Node to check
 475  *
 476  * \return true if \p node should be shut down, otherwise false
 477  */
 478 static bool
 479 needs_shutdown(pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 480 {
 481     if (pe__is_guest_or_remote_node(node)) {
 482        /* Do not send shutdown actions for Pacemaker Remote nodes.
 483         * @TODO We might come up with a good use for this in the future.
 484         */
 485         return false;
 486     }
 487     return node->details->online && node->details->shutdown;
 488 }
 489 
 490 /*!
 491  * \internal
 492  * \brief Track and order non-DC fencing
 493  *
 494  * \param[in] list    List of existing non-DC fencing actions
 495  * \param[in] action  Fencing action to prepend to \p list
 496  *
 497  * \return (Possibly new) head of \p list
 498  */
 499 static GList *
 500 add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 501 {
 502     if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
 503         && (list != NULL)) {
 504         /* Concurrent fencing is disabled, so order each non-DC
 505          * fencing in a chain. If there is any DC fencing or
 506          * shutdown, it will be ordered after the last action in the
 507          * chain later.
 508          */
 509         order_actions((pe_action_t *) list->data, action, pe_order_optional);
 510     }
 511     return g_list_prepend(list, action);
 512 }
 513 
 514 /*!
 515  * \internal
 516  * \brief Schedule a node for fencing
 517  *
 518  * \param[in] node      Node that requires fencing
 519  * \param[in] data_set  Cluster working set
 520  */
 521 static pe_action_t *
 522 schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 523 {
 524     pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
 525                                        FALSE, data_set);
 526 
 527     pe_warn("Scheduling node %s for fencing", pe__node_name(node));
 528     pcmk__order_vs_fence(fencing, data_set);
 529     return fencing;
 530 }
 531 
 532 /*!
 533  * \internal
 534  * \brief Create and order node fencing and shutdown actions
 535  *
 536  * \param[in] data_set  Cluster working set
 537  */
 538 static void
 539 schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 540 {
 541     pe_action_t *dc_down = NULL;
 542     bool integrity_lost = false;
 543     bool have_managed = any_managed_resources(data_set);
 544     GList *fencing_ops = NULL;
 545     GList *shutdown_ops = NULL;
 546 
 547     crm_trace("Scheduling fencing and shutdowns as needed");
 548     if (!have_managed) {
 549         crm_notice("No fencing will be done until there are resources to manage");
 550     }
 551 
 552     // Check each node for whether it needs fencing or shutdown
 553     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
 554         pe_node_t *node = (pe_node_t *) iter->data;
 555         pe_action_t *fencing = NULL;
 556 
 557         /* Guest nodes are "fenced" by recovering their container resource,
 558          * so handle them separately.
 559          */
 560         if (pe__is_guest_node(node)) {
 561             if (node->details->remote_requires_reset && have_managed
 562                 && pe_can_fence(data_set, node)) {
 563                 pcmk__fence_guest(node);
 564             }
 565             continue;
 566         }
 567 
 568         if (needs_fencing(node, have_managed, data_set)) {
 569             fencing = schedule_fencing(node, data_set);
 570 
 571             // Track DC and non-DC fence actions separately
 572             if (node->details->is_dc) {
 573                 dc_down = fencing;
 574             } else {
 575                 fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
 576             }
 577 
 578         } else if (needs_shutdown(node)) {
 579             pe_action_t *down_op = pcmk__new_shutdown_action(node);
 580 
 581             // Track DC and non-DC shutdown actions separately
 582             if (node->details->is_dc) {
 583                 dc_down = down_op;
 584             } else {
 585                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
 586             }
 587         }
 588 
 589         if ((fencing == NULL) && node->details->unclean) {
 590             integrity_lost = true;
 591             pe_warn("Node %s is unclean but cannot be fenced",
 592                     pe__node_name(node));
 593         }
 594     }
 595 
 596     if (integrity_lost) {
 597         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
 598             pe_warn("Resource functionality and data integrity cannot be "
 599                     "guaranteed (configure, enable, and test fencing to "
 600                     "correct this)");
 601 
 602         } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
 603             crm_notice("Unclean nodes will not be fenced until quorum is "
 604                        "attained or no-quorum-policy is set to ignore");
 605         }
 606     }
 607 
 608     if (dc_down != NULL) {
 609         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
 610          * DC elections. However, we don't want to order non-DC shutdowns before
 611          * a DC *fencing*, because even though we don't want a node that's
 612          * shutting down to become DC, the DC fencing could be ordered before a
 613          * clone stop that's also ordered before the shutdowns, thus leading to
 614          * a graph loop.
 615          */
 616         if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
 617             pcmk__order_after_each(dc_down, shutdown_ops);
 618         }
 619 
 620         // Order any non-DC fencing before any DC fencing or shutdown
 621 
 622         if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
 623             /* With concurrent fencing, order each non-DC fencing action
 624              * separately before any DC fencing or shutdown.
 625              */
 626             pcmk__order_after_each(dc_down, fencing_ops);
 627         } else if (fencing_ops != NULL) {
 628             /* Without concurrent fencing, the non-DC fencing actions are
 629              * already ordered relative to each other, so we just need to order
 630              * the DC fencing after the last action in the chain (which is the
 631              * first item in the list).
 632              */
 633             order_actions((pe_action_t *) fencing_ops->data, dc_down,
 634                           pe_order_optional);
 635         }
 636     }
 637     g_list_free(fencing_ops);
 638     g_list_free(shutdown_ops);
 639 }
 640 
 641 static void
 642 log_resource_details(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 643 {
 644     pcmk__output_t *out = data_set->priv;
 645     GList *all = NULL;
 646 
 647     /* We need a list of nodes that we are allowed to output information for.
 648      * This is necessary because out->message for all the resource-related
 649      * messages expects such a list, due to the `crm_mon --node=` feature.  Here,
 650      * we just make it a list of all the nodes.
 651      */
 652     all = g_list_prepend(all, (gpointer) "*");
 653 
 654     for (GList *item = data_set->resources; item != NULL; item = item->next) {
 655         pe_resource_t *rsc = (pe_resource_t *) item->data;
 656 
 657         // Log all resources except inactive orphans
 658         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
 659             || (rsc->role != RSC_ROLE_STOPPED)) {
 660             out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
 661         }
 662     }
 663 
 664     g_list_free(all);
 665 }
 666 
 667 static void
 668 log_all_actions(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 669 {
 670     /* This only ever outputs to the log, so ignore whatever output object was
 671      * previously set and just log instead.
 672      */
 673     pcmk__output_t *prev_out = data_set->priv;
 674     pcmk__output_t *out = NULL;
 675 
 676     if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
 677         return;
 678     }
 679 
 680     pe__register_messages(out);
 681     pcmk__register_lib_messages(out);
 682     pcmk__output_set_log_level(out, LOG_NOTICE);
 683     data_set->priv = out;
 684 
 685     out->begin_list(out, NULL, NULL, "Actions");
 686     pcmk__output_actions(data_set);
 687     out->end_list(out);
 688     out->finish(out, CRM_EX_OK, true, NULL);
 689     pcmk__output_free(out);
 690 
 691     data_set->priv = prev_out;
 692 }
 693 
 694 /*!
 695  * \internal
 696  * \brief Log all required but unrunnable actions at trace level
 697  *
 698  * \param[in] data_set  Cluster working set
 699  */
 700 static void
 701 log_unrunnable_actions(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 702 {
 703     const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 704 
 705     crm_trace("Required but unrunnable actions:");
 706     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
 707         pe_action_t *action = (pe_action_t *) iter->data;
 708 
 709         if (!pcmk_any_flags_set(action->flags, flags)) {
 710             pcmk__log_action("\t", action, true);
 711         }
 712     }
 713 }
 714 
 715 /*!
 716  * \internal
 717  * \brief Unpack the CIB for scheduling
 718  *
 719  * \param[in] cib       CIB XML to unpack (may be NULL if previously unpacked)
 720  * \param[in] flags     Working set flags to set in addition to defaults
 721  * \param[in] data_set  Cluster working set
 722  */
 723 static void
 724 unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 725 {
 726     const char* localhost_save = NULL;
 727 
 728     if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
 729         crm_trace("Reusing previously calculated cluster status");
 730         pe__set_working_set_flags(data_set, flags);
 731         return;
 732     }
 733 
 734     if (data_set->localhost) {
 735         localhost_save = data_set->localhost;
 736     }
 737 
 738     CRM_ASSERT(cib != NULL);
 739     crm_trace("Calculating cluster status");
 740 
 741     /* This will zero the entire struct without freeing anything first, so
 742      * callers should never call pcmk__schedule_actions() with a populated data
 743      * set unless pe_flag_have_status is set (i.e. cluster_status() was
 744      * previously called, whether directly or via pcmk__schedule_actions()).
 745      */
 746     set_working_set_defaults(data_set);
 747 
 748     if (localhost_save) {
 749         data_set->localhost = localhost_save;
 750     }
 751 
 752     pe__set_working_set_flags(data_set, flags);
 753     data_set->input = cib;
 754     cluster_status(data_set); // Sets pe_flag_have_status
 755 }
 756 
 757 /*!
 758  * \internal
 759  * \brief Run the scheduler for a given CIB
 760  *
 761  * \param[in]     cib       CIB XML to use as scheduler input
 762  * \param[in]     flags     Working set flags to set in addition to defaults
 763  * \param[in,out] data_set  Cluster working set
 764  */
 765 void
 766 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
     /* [previous][next][first][last][top][bottom][index][help] */
 767                        pe_working_set_t *data_set)
 768 {
 769     unpack_cib(cib, flags, data_set);
 770     pcmk__set_allocation_methods(data_set);
 771     pcmk__apply_node_health(data_set);
 772     pcmk__unpack_constraints(data_set);
 773     if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
 774         return;
 775     }
 776 
 777     if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
 778          pcmk__is_daemon) {
 779         log_resource_details(data_set);
 780     }
 781 
 782     apply_node_criteria(data_set);
 783 
 784     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
 785         return;
 786     }
 787 
 788     pcmk__create_internal_constraints(data_set);
 789     pcmk__handle_rsc_config_changes(data_set);
 790     allocate_resources(data_set);
 791     schedule_resource_actions(data_set);
 792 
 793     /* Remote ordering constraints need to happen prior to calculating fencing
 794      * because it is one more place we can mark nodes as needing fencing.
 795      */
 796     pcmk__order_remote_connection_actions(data_set);
 797 
 798     schedule_fencing_and_shutdowns(data_set);
 799     pcmk__apply_orderings(data_set);
 800     log_all_actions(data_set);
 801     pcmk__create_graph(data_set);
 802 
 803     if (get_crm_log_level() == LOG_TRACE) {
 804         log_unrunnable_actions(data_set);
 805     }
 806 }

/* [previous][next][first][last][top][bottom][index][help] */