root/tools/crm_resource_runtime.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. build_node_info_list
  2. cli_resource_search
  3. find_resource_attr
  4. find_matching_attr_resources_recursive
  5. find_matching_attr_resources
  6. cli_resource_update_attribute
  7. cli_resource_delete_attribute
  8. send_lrm_rsc_op
  9. rsc_fail_name
  10. clear_rsc_history
  11. clear_rsc_failures
  12. clear_rsc_fail_attrs
  13. cli_resource_delete
  14. cli_cleanup_all
  15. check_role
  16. check_managed
  17. check_locked
  18. node_is_unhealthy
  19. check_node_health
  20. cli_resource_check
  21. cli_resource_fail
  22. generate_resource_params
  23. resource_is_running_on
  24. get_active_resources
  25. dump_list
  26. display_list
  27. update_scheduler_input
  28. update_scheduler_input_to_cib
  29. update_dataset
  30. max_rsc_stop_timeout
  31. wait_time_estimate
  32. cli_resource_restart
  33. action_is_pending
  34. actions_are_pending
  35. print_pending_actions
  36. wait_till_stable
  37. get_action
  38. set_agent_environment
  39. apply_overrides
  40. cli_resource_execute_from_params
  41. cli_resource_execute
  42. cli_resource_move

   1 /*
   2  * Copyright 2004-2023 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <crm_resource.h>
  13 #include <crm/common/ipc_attrd_internal.h>
  14 #include <crm/common/ipc_controld.h>
  15 #include <crm/common/lists_internal.h>
  16 #include <crm/services_internal.h>
  17 
  18 static GList *
  19 build_node_info_list(const pcmk_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
  20 {
  21     GList *retval = NULL;
  22 
  23     for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
  24         const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
  25 
  26         for (const GList *iter2 = child->running_on;
  27              iter2 != NULL; iter2 = iter2->next) {
  28 
  29             const pcmk_node_t *node = (const pcmk_node_t *) iter2->data;
  30             node_info_t *ni = calloc(1, sizeof(node_info_t));
  31 
  32             ni->node_name = node->details->uname;
  33             ni->promoted = pcmk_is_set(rsc->flags, pcmk_rsc_promotable) &&
  34                            child->fns->state(child, TRUE) == pcmk_role_promoted;
  35 
  36             retval = g_list_prepend(retval, ni);
  37         }
  38     }
  39 
  40     return retval;
  41 }
  42 
  43 GList *
  44 cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
     /* [previous][next][first][last][top][bottom][index][help] */
  45                     pcmk_scheduler_t *scheduler)
  46 {
  47     GList *retval = NULL;
  48     const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
  49 
  50     if (pe_rsc_is_clone(rsc)) {
  51         retval = build_node_info_list(rsc);
  52 
  53     /* The anonymous clone children's common ID is supplied */
  54     } else if (pe_rsc_is_clone(parent)
  55                && !pcmk_is_set(rsc->flags, pcmk_rsc_unique)
  56                && rsc->clone_name
  57                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
  58                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
  59 
  60         retval = build_node_info_list(parent);
  61 
  62     } else if (rsc->running_on != NULL) {
  63         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
  64             pcmk_node_t *node = (pcmk_node_t *) iter->data;
  65             node_info_t *ni = calloc(1, sizeof(node_info_t));
  66             ni->node_name = node->details->uname;
  67             ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted);
  68 
  69             retval = g_list_prepend(retval, ni);
  70         }
  71     }
  72 
  73     return retval;
  74 }
  75 
  76 // \return Standard Pacemaker return code
  77 static int
  78 find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
     /* [previous][next][first][last][top][bottom][index][help] */
  79                    const char *rsc, const char *attr_set_type, const char *set_name,
  80                    const char *attr_id, const char *attr_name, char **value)
  81 {
  82     int rc = pcmk_rc_ok;
  83     xmlNode *xml_search = NULL;
  84     GString *xpath = NULL;
  85     const char *xpath_base = NULL;
  86 
  87     if(value) {
  88         *value = NULL;
  89     }
  90 
  91     if(the_cib == NULL) {
  92         return ENOTCONN;
  93     }
  94 
  95     xpath_base = pcmk_cib_xpath_for(XML_CIB_TAG_RESOURCES);
  96     if (xpath_base == NULL) {
  97         crm_err(XML_CIB_TAG_RESOURCES " CIB element not known (bug?)");
  98         return ENOMSG;
  99     }
 100 
 101     xpath = g_string_sized_new(1024);
 102     pcmk__g_strcat(xpath,
 103                    xpath_base, "//*[@" XML_ATTR_ID "=\"", rsc, "\"]", NULL);
 104 
 105     if (attr_set_type != NULL) {
 106         pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
 107         if (set_name != NULL) {
 108             pcmk__g_strcat(xpath, "[@" XML_ATTR_ID "=\"", set_name, "\"]",
 109                            NULL);
 110         }
 111     }
 112 
 113     g_string_append(xpath, "//" XML_CIB_TAG_NVPAIR "[");
 114     if (attr_id != NULL) {
 115         pcmk__g_strcat(xpath, "@" XML_ATTR_ID "=\"", attr_id, "\"", NULL);
 116     }
 117 
 118     if (attr_name != NULL) {
 119         if (attr_id != NULL) {
 120             g_string_append(xpath, " and ");
 121         }
 122         pcmk__g_strcat(xpath, "@" XML_NVPAIR_ATTR_NAME "=\"", attr_name, "\"",
 123                        NULL);
 124     }
 125     g_string_append_c(xpath, ']');
 126 
 127     rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search,
 128                               cib_sync_call | cib_scope_local | cib_xpath);
 129     rc = pcmk_legacy2rc(rc);
 130 
 131     if (rc != pcmk_rc_ok) {
 132         goto done;
 133     }
 134 
 135     crm_log_xml_debug(xml_search, "Match");
 136     if (xml_search->children != NULL) {
 137         xmlNode *child = NULL;
 138 
 139         rc = ENOTUNIQ;
 140         out->info(out, "Multiple attributes match name=%s", attr_name);
 141 
 142         for (child = pcmk__xml_first_child(xml_search); child != NULL;
 143              child = pcmk__xml_next(child)) {
 144             out->info(out, "  Value: %s \t(id=%s)",
 145                       crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
 146         }
 147 
 148         out->spacer(out);
 149 
 150     } else if(value) {
 151         pcmk__str_update(value, crm_element_value(xml_search, attr));
 152     }
 153 
 154   done:
 155     g_string_free(xpath, TRUE);
 156     free_xml(xml_search);
 157     return rc;
 158 }
 159 
 160 /* PRIVATE. Use the find_matching_attr_resources instead. */
 161 static void
 162 find_matching_attr_resources_recursive(pcmk__output_t *out,
     /* [previous][next][first][last][top][bottom][index][help] */
 163                                        GList /* <pcmk_resource_t*> */ **result,
 164                                        pcmk_resource_t *rsc, const char *rsc_id,
 165                                        const char * attr_set, const char * attr_set_type,
 166                                        const char * attr_id, const char * attr_name,
 167                                        cib_t * cib, const char * cmd, int depth)
 168 {
 169     int rc = pcmk_rc_ok;
 170     char *lookup_id = clone_strip(rsc->id);
 171     char *local_attr_id = NULL;
 172 
 173     /* visit the children */
 174     for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
 175         find_matching_attr_resources_recursive(out, result,
 176                                                (pcmk_resource_t *) gIter->data,
 177                                                rsc_id, attr_set, attr_set_type,
 178                                                attr_id, attr_name, cib, cmd, depth+1);
 179         /* do it only once for clones */
 180         if (rsc->variant == pcmk_rsc_variant_clone) {
 181             break;
 182         }
 183     }
 184 
 185     rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
 186                             attr_set, attr_id, attr_name, &local_attr_id);
 187     /* Post-order traversal.
 188      * The root is always on the list and it is the last item. */
 189     if((0 == depth) || (pcmk_rc_ok == rc)) {
 190         /* push the head */
 191         *result = g_list_append(*result, rsc);
 192     }
 193 
 194     free(local_attr_id);
 195     free(lookup_id);
 196 }
 197 
 198 
 199 /* The result is a linearized pre-ordered tree of resources. */
 200 static GList/*<pcmk_resource_t*>*/ *
 201 find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
 202                              const char * rsc_id, const char * attr_set,
 203                              const char * attr_set_type, const char * attr_id,
 204                              const char * attr_name, cib_t * cib, const char * cmd,
 205                              gboolean force)
 206 {
 207     int rc = pcmk_rc_ok;
 208     char *lookup_id = NULL;
 209     char *local_attr_id = NULL;
 210     GList * result = NULL;
 211     /* If --force is used, update only the requested resource (clone or primitive).
 212      * Otherwise, if the primitive has the attribute, use that.
 213      * Otherwise use the clone. */
 214     if(force == TRUE) {
 215         return g_list_append(result, rsc);
 216     }
 217     if ((rsc->parent != NULL)
 218         && (rsc->parent->variant == pcmk_rsc_variant_clone)) {
 219         int rc = pcmk_rc_ok;
 220         char *local_attr_id = NULL;
 221         rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
 222                                 attr_set, attr_id, attr_name, &local_attr_id);
 223         free(local_attr_id);
 224 
 225         if(rc != pcmk_rc_ok) {
 226             rsc = rsc->parent;
 227             out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
 228                       cmd, attr_name, rsc->id, rsc_id);
 229         }
 230         return g_list_append(result, rsc);
 231 
 232     } else if ((rsc->parent == NULL) && (rsc->children != NULL)
 233                && (rsc->variant == pcmk_rsc_variant_clone)) {
 234         pcmk_resource_t *child = rsc->children->data;
 235 
 236         if (child->variant == pcmk_rsc_variant_primitive) {
 237             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
 238             rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
 239                                     attr_set, attr_id, attr_name, &local_attr_id);
 240 
 241             if(rc == pcmk_rc_ok) {
 242                 rsc = child;
 243                 out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
 244                           attr_name, lookup_id, cmd, rsc_id);
 245             }
 246 
 247             free(local_attr_id);
 248             free(lookup_id);
 249         }
 250         return g_list_append(result, rsc);
 251     }
 252     /* If the resource is a group ==> children inherit the attribute if defined. */
 253     find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
 254                                            attr_set_type, attr_id, attr_name,
 255                                            cib, cmd, 0);
 256     return result;
 257 }
 258 
 259 // \return Standard Pacemaker return code
 260 int
 261 cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 262                               const char *attr_set, const char *attr_set_type,
 263                               const char *attr_id, const char *attr_name,
 264                               const char *attr_value, gboolean recursive,
 265                               cib_t *cib, int cib_options, gboolean force)
 266 {
 267     pcmk__output_t *out = rsc->cluster->priv;
 268     int rc = pcmk_rc_ok;
 269 
 270     char *found_attr_id = NULL;
 271 
 272     GList/*<pcmk_resource_t*>*/ *resources = NULL;
 273     const char *top_id = pe__const_top_resource(rsc, false)->id;
 274 
 275     if ((attr_id == NULL) && !force) {
 276         find_resource_attr(out, cib, XML_ATTR_ID, top_id, NULL, NULL, NULL,
 277                            attr_name, NULL);
 278     }
 279 
 280     if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
 281         if (!force) {
 282             rc = find_resource_attr(out, cib, XML_ATTR_ID, top_id,
 283                                     XML_TAG_META_SETS, attr_set, attr_id,
 284                                     attr_name, &found_attr_id);
 285             if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
 286                 out->err(out,
 287                          "WARNING: There is already a meta attribute "
 288                          "for '%s' called '%s' (id=%s)",
 289                          top_id, attr_name, found_attr_id);
 290                 out->err(out,
 291                          "         Delete '%s' first or use the force option "
 292                          "to override", found_attr_id);
 293             }
 294             free(found_attr_id);
 295             if (rc == pcmk_rc_ok) {
 296                 return ENOTUNIQ;
 297             }
 298         }
 299         resources = g_list_append(resources, rsc);
 300 
 301     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
 302         crm_xml_add(rsc->xml, attr_name, attr_value);
 303         CRM_ASSERT(cib != NULL);
 304         rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
 305                                 cib_options);
 306         rc = pcmk_legacy2rc(rc);
 307         if (rc == pcmk_rc_ok) {
 308             out->info(out, "Set attribute: name=%s value=%s",
 309                       attr_name, attr_value);
 310         }
 311         return rc;
 312 
 313     } else {
 314         resources = find_matching_attr_resources(out, rsc, requested_name,
 315                                                  attr_set, attr_set_type,
 316                                                  attr_id, attr_name, cib,
 317                                                  "update", force);
 318     }
 319 
 320     /* If the user specified attr_set or attr_id, the intent is to modify a
 321      * single resource, which will be the last item in the list.
 322      */
 323     if ((attr_set != NULL) || (attr_id != NULL)) {
 324         GList *last = g_list_last(resources);
 325 
 326         resources = g_list_remove_link(resources, last);
 327         g_list_free(resources);
 328         resources = last;
 329     }
 330 
 331     for (GList *iter = resources; iter != NULL; iter = iter->next) {
 332         char *lookup_id = NULL;
 333         char *local_attr_set = NULL;
 334         const char *rsc_attr_id = attr_id;
 335         const char *rsc_attr_set = attr_set;
 336 
 337         xmlNode *xml_top = NULL;
 338         xmlNode *xml_obj = NULL;
 339         found_attr_id = NULL;
 340 
 341         rsc = (pcmk_resource_t *) iter->data;
 342 
 343         lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
 344         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
 345                                 attr_set, attr_id, attr_name, &found_attr_id);
 346 
 347         switch (rc) {
 348             case pcmk_rc_ok:
 349                 crm_debug("Found a match for name=%s: id=%s",
 350                           attr_name, found_attr_id);
 351                 rsc_attr_id = found_attr_id;
 352                 break;
 353 
 354             case ENXIO:
 355                 if (rsc_attr_set == NULL) {
 356                     local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
 357                                                        attr_set_type);
 358                     rsc_attr_set = local_attr_set;
 359                 }
 360                 if (rsc_attr_id == NULL) {
 361                     found_attr_id = crm_strdup_printf("%s-%s",
 362                                                       rsc_attr_set, attr_name);
 363                     rsc_attr_id = found_attr_id;
 364                 }
 365 
 366                 xml_top = create_xml_node(NULL, (const char *) rsc->xml->name);
 367                 crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 368 
 369                 xml_obj = create_xml_node(xml_top, attr_set_type);
 370                 crm_xml_add(xml_obj, XML_ATTR_ID, rsc_attr_set);
 371                 break;
 372 
 373             default:
 374                 free(lookup_id);
 375                 free(found_attr_id);
 376                 g_list_free(resources);
 377                 return rc;
 378         }
 379 
 380         xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
 381                                         attr_value);
 382         if (xml_top == NULL) {
 383             xml_top = xml_obj;
 384         }
 385 
 386         crm_log_xml_debug(xml_top, "Update");
 387 
 388         rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top,
 389                                cib_options);
 390         rc = pcmk_legacy2rc(rc);
 391         if (rc == pcmk_rc_ok) {
 392             out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s",
 393                       lookup_id, found_attr_id,
 394                       ((rsc_attr_set == NULL)? "" : " set="),
 395                       pcmk__s(rsc_attr_set, ""),
 396                       ((attr_name == NULL)? "" : " name="),
 397                       pcmk__s(attr_name, ""), attr_value);
 398         }
 399 
 400         free_xml(xml_top);
 401 
 402         free(lookup_id);
 403         free(found_attr_id);
 404         free(local_attr_set);
 405 
 406         if (recursive
 407             && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS,
 408                             pcmk__str_casei)) {
 409             GList *lpc = NULL;
 410             static bool need_init = true;
 411 
 412             if (need_init) {
 413                 need_init = false;
 414                 pcmk__unpack_constraints(rsc->cluster);
 415                 pe__clear_resource_flags_on_all(rsc->cluster,
 416                                                 pcmk_rsc_detect_loop);
 417             }
 418 
 419             /* We want to set the attribute only on resources explicitly
 420              * colocated with this one, so we use rsc->rsc_cons_lhs directly
 421              * rather than the with_this_colocations() method.
 422              */
 423             pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
 424             for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
 425                 pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
 426 
 427                 crm_debug("Checking %s %d", cons->id, cons->score);
 428                 if (!pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)
 429                     && (cons->score > 0)) {
 430                     crm_debug("Setting %s=%s for dependent resource %s",
 431                               attr_name, attr_value, cons->dependent->id);
 432                     cli_resource_update_attribute(cons->dependent,
 433                                                   cons->dependent->id, NULL,
 434                                                   attr_set_type, NULL,
 435                                                   attr_name, attr_value,
 436                                                   recursive, cib, cib_options,
 437                                                   force);
 438                 }
 439             }
 440         }
 441     }
 442     g_list_free(resources);
 443     return rc;
 444 }
 445 
 446 // \return Standard Pacemaker return code
 447 int
 448 cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 449                               const char *attr_set, const char *attr_set_type,
 450                               const char *attr_id, const char *attr_name,
 451                               cib_t *cib, int cib_options, gboolean force)
 452 {
 453     pcmk__output_t *out = rsc->cluster->priv;
 454     int rc = pcmk_rc_ok;
 455     GList/*<pcmk_resource_t*>*/ *resources = NULL;
 456 
 457     if ((attr_id == NULL) && !force) {
 458         find_resource_attr(out, cib, XML_ATTR_ID,
 459                            pe__const_top_resource(rsc, false)->id, NULL,
 460                            NULL, NULL, attr_name, NULL);
 461     }
 462 
 463     if (pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
 464         resources = find_matching_attr_resources(out, rsc, requested_name,
 465                                                  attr_set, attr_set_type,
 466                                                  attr_id, attr_name, cib,
 467                                                  "delete", force);
 468 
 469     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
 470         xml_remove_prop(rsc->xml, attr_name);
 471         CRM_ASSERT(cib != NULL);
 472         rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
 473                                 cib_options);
 474         rc = pcmk_legacy2rc(rc);
 475         if (rc == pcmk_rc_ok) {
 476             out->info(out, "Deleted attribute: %s", attr_name);
 477         }
 478         return rc;
 479 
 480     } else {
 481         resources = g_list_append(resources, rsc);
 482     }
 483 
 484     for (GList *iter = resources; iter != NULL; iter = iter->next) {
 485         char *lookup_id = NULL;
 486         xmlNode *xml_obj = NULL;
 487         char *found_attr_id = NULL;
 488         const char *rsc_attr_id = attr_id;
 489 
 490         rsc = (pcmk_resource_t *) iter->data;
 491 
 492         lookup_id = clone_strip(rsc->id);
 493         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
 494                                 attr_set, attr_id, attr_name, &found_attr_id);
 495         switch (rc) {
 496             case pcmk_rc_ok:
 497                 break;
 498 
 499             case ENXIO:
 500                 free(lookup_id);
 501                 rc = pcmk_rc_ok;
 502                 continue;
 503 
 504             default:
 505                 free(lookup_id);
 506                 g_list_free(resources);
 507                 return rc;
 508         }
 509 
 510         if (rsc_attr_id == NULL) {
 511             rsc_attr_id = found_attr_id;
 512         }
 513 
 514         xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
 515         crm_log_xml_debug(xml_obj, "Delete");
 516 
 517         CRM_ASSERT(cib);
 518         rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj,
 519                                cib_options);
 520         rc = pcmk_legacy2rc(rc);
 521 
 522         if (rc == pcmk_rc_ok) {
 523             out->info(out, "Deleted '%s' option: id=%s%s%s%s%s",
 524                       lookup_id, found_attr_id,
 525                       ((attr_set == NULL)? "" : " set="),
 526                       pcmk__s(attr_set, ""),
 527                       ((attr_name == NULL)? "" : " name="),
 528                       pcmk__s(attr_name, ""));
 529         }
 530 
 531         free(lookup_id);
 532         free_xml(xml_obj);
 533         free(found_attr_id);
 534     }
 535     g_list_free(resources);
 536     return rc;
 537 }
 538 
 539 // \return Standard Pacemaker return code
 540 static int
 541 send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
     /* [previous][next][first][last][top][bottom][index][help] */
 542                 const char *host_uname, const char *rsc_id,
 543                 pcmk_scheduler_t *scheduler)
 544 {
 545     pcmk__output_t *out = scheduler->priv;
 546     const char *router_node = host_uname;
 547     const char *rsc_api_id = NULL;
 548     const char *rsc_long_id = NULL;
 549     const char *rsc_class = NULL;
 550     const char *rsc_provider = NULL;
 551     const char *rsc_type = NULL;
 552     bool cib_only = false;
 553     pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
 554 
 555     if (rsc == NULL) {
 556         out->err(out, "Resource %s not found", rsc_id);
 557         return ENXIO;
 558 
 559     } else if (rsc->variant != pcmk_rsc_variant_primitive) {
 560         out->err(out, "We can only process primitive resources, not %s", rsc_id);
 561         return EINVAL;
 562     }
 563 
 564     rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 565     rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
 566     rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 567     if ((rsc_class == NULL) || (rsc_type == NULL)) {
 568         out->err(out, "Resource %s does not have a class and type", rsc_id);
 569         return EINVAL;
 570     }
 571 
 572     {
 573         pcmk_node_t *node = pe_find_node(scheduler->nodes, host_uname);
 574 
 575         if (node == NULL) {
 576             out->err(out, "Node %s not found", host_uname);
 577             return pcmk_rc_node_unknown;
 578         }
 579 
 580         if (!(node->details->online)) {
 581             if (do_fail_resource) {
 582                 out->err(out, "Node %s is not online", host_uname);
 583                 return ENOTCONN;
 584             } else {
 585                 cib_only = true;
 586             }
 587         }
 588         if (!cib_only && pe__is_guest_or_remote_node(node)) {
 589             node = pe__current_node(node->details->remote_rsc);
 590             if (node == NULL) {
 591                 out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
 592                          host_uname);
 593                 return ENOTCONN;
 594             }
 595             router_node = node->details->uname;
 596         }
 597     }
 598 
 599     if (rsc->clone_name) {
 600         rsc_api_id = rsc->clone_name;
 601         rsc_long_id = rsc->id;
 602     } else {
 603         rsc_api_id = rsc->id;
 604     }
 605     if (do_fail_resource) {
 606         return pcmk_controld_api_fail(controld_api, host_uname, router_node,
 607                                       rsc_api_id, rsc_long_id,
 608                                       rsc_class, rsc_provider, rsc_type);
 609     } else {
 610         return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
 611                                          rsc_api_id, rsc_long_id, rsc_class,
 612                                          rsc_provider, rsc_type, cib_only);
 613     }
 614 }
 615 
 616 /*!
 617  * \internal
 618  * \brief Get resource name as used in failure-related node attributes
 619  *
 620  * \param[in] rsc  Resource to check
 621  *
 622  * \return Newly allocated string containing resource's fail name
 623  * \note The caller is responsible for freeing the result.
 624  */
 625 static inline char *
 626 rsc_fail_name(const pcmk_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 629 
 630     if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
 631         return strdup(name);
 632     }
 633     return clone_strip(name);
 634 }
 635 
 636 // \return Standard Pacemaker return code
 637 static int
 638 clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
     /* [previous][next][first][last][top][bottom][index][help] */
 639                   const char *rsc_id, pcmk_scheduler_t *scheduler)
 640 {
 641     int rc = pcmk_rc_ok;
 642 
 643     /* Erase the resource's entire LRM history in the CIB, even if we're only
 644      * clearing a single operation's fail count. If we erased only entries for a
 645      * single operation, we might wind up with a wrong idea of the current
 646      * resource state, and we might not re-probe the resource.
 647      */
 648     rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler);
 649     if (rc != pcmk_rc_ok) {
 650         return rc;
 651     }
 652 
 653     crm_trace("Processing %d mainloop inputs",
 654               pcmk_controld_api_replies_expected(controld_api));
 655     while (g_main_context_iteration(NULL, FALSE)) {
 656         crm_trace("Processed mainloop input, %d still remaining",
 657                   pcmk_controld_api_replies_expected(controld_api));
 658     }
 659     return rc;
 660 }
 661 
 662 // \return Standard Pacemaker return code
 663 static int
 664 clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
     /* [previous][next][first][last][top][bottom][index][help] */
 665                    const char *node_name, const char *rsc_id, const char *operation,
 666                    const char *interval_spec, pcmk_scheduler_t *scheduler)
 667 {
 668     int rc = pcmk_rc_ok;
 669     const char *failed_value = NULL;
 670     const char *failed_id = NULL;
 671     const char *interval_ms_s = NULL;
 672     GHashTable *rscs = NULL;
 673     GHashTableIter iter;
 674 
 675     /* Create a hash table to use as a set of resources to clean. This lets us
 676      * clean each resource only once (per node) regardless of how many failed
 677      * operations it has.
 678      */
 679     rscs = pcmk__strkey_table(NULL, NULL);
 680 
 681     // Normalize interval to milliseconds for comparison to history entry
 682     if (operation) {
 683         interval_ms_s = crm_strdup_printf("%u",
 684                                           crm_parse_interval_spec(interval_spec));
 685     }
 686 
 687     for (xmlNode *xml_op = pcmk__xml_first_child(scheduler->failed);
 688          xml_op != NULL;
 689          xml_op = pcmk__xml_next(xml_op)) {
 690 
 691         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
 692         if (failed_id == NULL) {
 693             // Malformed history entry, should never happen
 694             continue;
 695         }
 696 
 697         // No resource specified means all resources match
 698         if (rsc_id) {
 699             pcmk_resource_t *fail_rsc = NULL;
 700 
 701             fail_rsc = pe_find_resource_with_flags(scheduler->resources,
 702                                                    failed_id,
 703                                                    pcmk_rsc_match_history
 704                                                    |pcmk_rsc_match_anon_basename);
 705             if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
 706                 continue;
 707             }
 708         }
 709 
 710         // Host name should always have been provided by this point
 711         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
 712         if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
 713             continue;
 714         }
 715 
 716         // No operation specified means all operations match
 717         if (operation) {
 718             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
 719             if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
 720                 continue;
 721             }
 722 
 723             // Interval (if operation was specified) defaults to 0 (not all)
 724             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
 725             if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
 726                 continue;
 727             }
 728         }
 729 
 730         g_hash_table_add(rscs, (gpointer) failed_id);
 731     }
 732 
 733     g_hash_table_iter_init(&iter, rscs);
 734     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
 735         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
 736         rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler);
 737         if (rc != pcmk_rc_ok) {
 738             return rc;
 739         }
 740     }
 741     g_hash_table_destroy(rscs);
 742     return rc;
 743 }
 744 
 745 // \return Standard Pacemaker return code
 746 static int
 747 clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation,
     /* [previous][next][first][last][top][bottom][index][help] */
 748                      const char *interval_spec, const pcmk_node_t *node)
 749 {
 750     int rc = pcmk_rc_ok;
 751     int attr_options = pcmk__node_attr_none;
 752     char *rsc_name = rsc_fail_name(rsc);
 753 
 754     if (pe__is_guest_or_remote_node(node)) {
 755         attr_options |= pcmk__node_attr_remote;
 756     }
 757 
 758     rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name,
 759                                         operation, interval_spec, NULL,
 760                                         attr_options);
 761     free(rsc_name);
 762     return rc;
 763 }
 764 
 765 // \return Standard Pacemaker return code
 766 int
 767 cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
     /* [previous][next][first][last][top][bottom][index][help] */
 768                     const pcmk_resource_t *rsc, const char *operation,
 769                     const char *interval_spec, bool just_failures,
 770                     pcmk_scheduler_t *scheduler, gboolean force)
 771 {
 772     pcmk__output_t *out = scheduler->priv;
 773     int rc = pcmk_rc_ok;
 774     pcmk_node_t *node = NULL;
 775 
 776     if (rsc == NULL) {
 777         return ENXIO;
 778 
 779     } else if (rsc->children) {
 780 
 781         for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
 782             const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data;
 783 
 784             rc = cli_resource_delete(controld_api, host_uname, child, operation,
 785                                      interval_spec, just_failures, scheduler,
 786                                      force);
 787             if (rc != pcmk_rc_ok) {
 788                 return rc;
 789             }
 790         }
 791         return pcmk_rc_ok;
 792 
 793     } else if (host_uname == NULL) {
 794         GList *lpc = NULL;
 795         GList *nodes = g_hash_table_get_values(rsc->known_on);
 796 
 797         if(nodes == NULL && force) {
 798             nodes = pcmk__copy_node_list(scheduler->nodes, false);
 799 
 800         } else if(nodes == NULL && rsc->exclusive_discover) {
 801             GHashTableIter iter;
 802             pcmk_node_t *node = NULL;
 803 
 804             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
 805             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
 806                 if(node->weight >= 0) {
 807                     nodes = g_list_prepend(nodes, node);
 808                 }
 809             }
 810 
 811         } else if(nodes == NULL) {
 812             nodes = g_hash_table_get_values(rsc->allowed_nodes);
 813         }
 814 
 815         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
 816             node = (pcmk_node_t *) lpc->data;
 817 
 818             if (node->details->online) {
 819                 rc = cli_resource_delete(controld_api, node->details->uname, rsc,
 820                                          operation, interval_spec, just_failures,
 821                                          scheduler, force);
 822             }
 823             if (rc != pcmk_rc_ok) {
 824                 g_list_free(nodes);
 825                 return rc;
 826             }
 827         }
 828 
 829         g_list_free(nodes);
 830         return pcmk_rc_ok;
 831     }
 832 
 833     node = pe_find_node(scheduler->nodes, host_uname);
 834 
 835     if (node == NULL) {
 836         out->err(out, "Unable to clean up %s because node %s not found",
 837                  rsc->id, host_uname);
 838         return ENODEV;
 839     }
 840 
 841     if (!node->details->rsc_discovery_enabled) {
 842         out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
 843                  rsc->id, host_uname);
 844         return EOPNOTSUPP;
 845     }
 846 
 847     if (controld_api == NULL) {
 848         out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
 849                  rsc->id, host_uname);
 850         return pcmk_rc_ok;
 851     }
 852 
 853     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
 854     if (rc != pcmk_rc_ok) {
 855         out->err(out, "Unable to clean up %s failures on %s: %s",
 856                  rsc->id, host_uname, pcmk_rc_str(rc));
 857         return rc;
 858     }
 859 
 860     if (just_failures) {
 861         rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
 862                                 interval_spec, scheduler);
 863     } else {
 864         rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler);
 865     }
 866     if (rc != pcmk_rc_ok) {
 867         out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
 868                  rsc->id, host_uname, pcmk_rc_str(rc));
 869     } else {
 870         out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
 871     }
 872     return rc;
 873 }
 874 
 875 // \return Standard Pacemaker return code
 876 int
 877 cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 878                 const char *operation, const char *interval_spec,
 879                 pcmk_scheduler_t *scheduler)
 880 {
 881     pcmk__output_t *out = scheduler->priv;
 882     int rc = pcmk_rc_ok;
 883     int attr_options = pcmk__node_attr_none;
 884     const char *display_name = node_name? node_name : "all nodes";
 885 
 886     if (controld_api == NULL) {
 887         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
 888                   display_name);
 889         return rc;
 890     }
 891 
 892     if (node_name) {
 893         pcmk_node_t *node = pe_find_node(scheduler->nodes, node_name);
 894 
 895         if (node == NULL) {
 896             out->err(out, "Unknown node: %s", node_name);
 897             return ENXIO;
 898         }
 899         if (pe__is_guest_or_remote_node(node)) {
 900             attr_options |= pcmk__node_attr_remote;
 901         }
 902     }
 903 
 904     rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
 905                                         interval_spec, NULL, attr_options);
 906     if (rc != pcmk_rc_ok) {
 907         out->err(out, "Unable to clean up all failures on %s: %s",
 908                  display_name, pcmk_rc_str(rc));
 909         return rc;
 910     }
 911 
 912     if (node_name) {
 913         rc = clear_rsc_failures(out, controld_api, node_name, NULL,
 914                                 operation, interval_spec, scheduler);
 915         if (rc != pcmk_rc_ok) {
 916             out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
 917                      node_name, pcmk_rc_str(rc));
 918             return rc;
 919         }
 920     } else {
 921         for (GList *iter = scheduler->nodes; iter; iter = iter->next) {
 922             pcmk_node_t *node = (pcmk_node_t *) iter->data;
 923 
 924             rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
 925                                     operation, interval_spec, scheduler);
 926             if (rc != pcmk_rc_ok) {
 927                 out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
 928                          pcmk_rc_str(rc));
 929                 return rc;
 930             }
 931         }
 932     }
 933 
 934     out->info(out, "Cleaned up all resources on %s", display_name);
 935     return rc;
 936 }
 937 
 938 static void
 939 check_role(resource_checks_t *checks)
     /* [previous][next][first][last][top][bottom][index][help] */
 940 {
 941     const char *role_s = g_hash_table_lookup(checks->rsc->meta,
 942                                              XML_RSC_ATTR_TARGET_ROLE);
 943 
 944     if (role_s == NULL) {
 945         return;
 946     }
 947     switch (text2role(role_s)) {
 948         case pcmk_role_stopped:
 949             checks->flags |= rsc_remain_stopped;
 950             break;
 951 
 952         case pcmk_role_unpromoted:
 953             if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
 954                             pcmk_rsc_promotable)) {
 955                 checks->flags |= rsc_unpromotable;
 956             }
 957             break;
 958 
 959         default:
 960             break;
 961     }
 962 }
 963 
 964 static void
 965 check_managed(resource_checks_t *checks)
     /* [previous][next][first][last][top][bottom][index][help] */
 966 {
 967     const char *managed_s = g_hash_table_lookup(checks->rsc->meta,
 968                                                 XML_RSC_ATTR_MANAGED);
 969 
 970     if ((managed_s != NULL) && !crm_is_true(managed_s)) {
 971         checks->flags |= rsc_unmanaged;
 972     }
 973 }
 974 
 975 static void
 976 check_locked(resource_checks_t *checks)
     /* [previous][next][first][last][top][bottom][index][help] */
 977 {
 978     if (checks->rsc->lock_node != NULL) {
 979         checks->flags |= rsc_locked;
 980         checks->lock_node = checks->rsc->lock_node->details->uname;
 981     }
 982 }
 983 
 984 static bool
 985 node_is_unhealthy(pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 986 {
 987     switch (pe__health_strategy(node->details->data_set)) {
 988         case pcmk__health_strategy_none:
 989             break;
 990 
 991         case pcmk__health_strategy_no_red:
 992             if (pe__node_health(node) < 0) {
 993                 return true;
 994             }
 995             break;
 996 
 997         case pcmk__health_strategy_only_green:
 998             if (pe__node_health(node) <= 0) {
 999                 return true;
1000             }
1001             break;
1002 
1003         case pcmk__health_strategy_progressive:
1004         case pcmk__health_strategy_custom:
1005             /* @TODO These are finite scores, possibly with rules, and possibly
1006              * combining with other scores, so attributing these as a cause is
1007              * nontrivial.
1008              */
1009             break;
1010     }
1011     return false;
1012 }
1013 
1014 static void
1015 check_node_health(resource_checks_t *checks, pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
1016 {
1017     if (node == NULL) {
1018         GHashTableIter iter;
1019         bool allowed = false;
1020         bool all_nodes_unhealthy = true;
1021 
1022         g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes);
1023         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
1024             allowed = true;
1025             if (!node_is_unhealthy(node)) {
1026                 all_nodes_unhealthy = false;
1027                 break;
1028             }
1029         }
1030         if (allowed && all_nodes_unhealthy) {
1031             checks->flags |= rsc_node_health;
1032         }
1033 
1034     } else if (node_is_unhealthy(node)) {
1035         checks->flags |= rsc_node_health;
1036     }
1037 }
1038 
1039 int
1040 cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
1041 {
1042     resource_checks_t checks = { .rsc = rsc };
1043 
1044     check_role(&checks);
1045     check_managed(&checks);
1046     check_locked(&checks);
1047     check_node_health(&checks, node);
1048 
1049     return out->message(out, "resource-check-list", &checks);
1050 }
1051 
1052 // \return Standard Pacemaker return code
1053 int
1054 cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
     /* [previous][next][first][last][top][bottom][index][help] */
1055                   const char *rsc_id, pcmk_scheduler_t *scheduler)
1056 {
1057     crm_notice("Failing %s on %s", rsc_id, host_uname);
1058     return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler);
1059 }
1060 
1061 static GHashTable *
1062 generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
1063                          pcmk_scheduler_t *scheduler)
1064 {
1065     GHashTable *params = NULL;
1066     GHashTable *meta = NULL;
1067     GHashTable *combined = NULL;
1068     GHashTableIter iter;
1069     char *key = NULL;
1070     char *value = NULL;
1071 
1072     combined = pcmk__strkey_table(free, free);
1073 
1074     params = pe_rsc_params(rsc, node, scheduler);
1075     if (params != NULL) {
1076         g_hash_table_iter_init(&iter, params);
1077         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
1078             g_hash_table_insert(combined, strdup(key), strdup(value));
1079         }
1080     }
1081 
1082     meta = pcmk__strkey_table(free, free);
1083     get_meta_attributes(meta, rsc, node, scheduler);
1084     if (meta != NULL) {
1085         g_hash_table_iter_init(&iter, meta);
1086         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
1087             char *crm_name = crm_meta_name(key);
1088 
1089             g_hash_table_insert(combined, crm_name, strdup(value));
1090         }
1091         g_hash_table_destroy(meta);
1092     }
1093 
1094     return combined;
1095 }
1096 
1097 bool resource_is_running_on(pcmk_resource_t *rsc, const char *host)
     /* [previous][next][first][last][top][bottom][index][help] */
1098 {
1099     bool found = true;
1100     GList *hIter = NULL;
1101     GList *hosts = NULL;
1102 
1103     if (rsc == NULL) {
1104         return false;
1105     }
1106 
1107     rsc->fns->location(rsc, &hosts, TRUE);
1108     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
1109         pcmk_node_t *node = (pcmk_node_t *) hIter->data;
1110 
1111         if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
1112             crm_trace("Resource %s is running on %s\n", rsc->id, host);
1113             goto done;
1114         }
1115     }
1116 
1117     if (host != NULL) {
1118         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
1119         found = false;
1120 
1121     } else if(host == NULL && hosts == NULL) {
1122         crm_trace("Resource %s is not running\n", rsc->id);
1123         found = false;
1124     }
1125 
1126   done:
1127     g_list_free(hosts);
1128     return found;
1129 }
1130 
1131 /*!
1132  * \internal
1133  * \brief Create a list of all resources active on host from a given list
1134  *
1135  * \param[in] host      Name of host to check whether resources are active
1136  * \param[in] rsc_list  List of resources to check
1137  *
1138  * \return New list of resources from list that are active on host
1139  */
1140 static GList *
1141 get_active_resources(const char *host, GList *rsc_list)
     /* [previous][next][first][last][top][bottom][index][help] */
1142 {
1143     GList *rIter = NULL;
1144     GList *active = NULL;
1145 
1146     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
1147         pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data;
1148 
1149         /* Expand groups to their members, because if we're restarting a member
1150          * other than the first, we can't otherwise tell which resources are
1151          * stopping and starting.
1152          */
1153         if (rsc->variant == pcmk_rsc_variant_group) {
1154             active = g_list_concat(active,
1155                                    get_active_resources(host, rsc->children));
1156         } else if (resource_is_running_on(rsc, host)) {
1157             active = g_list_append(active, strdup(rsc->id));
1158         }
1159     }
1160     return active;
1161 }
1162 
1163 static void dump_list(GList *items, const char *tag)
     /* [previous][next][first][last][top][bottom][index][help] */
1164 {
1165     int lpc = 0;
1166     GList *item = NULL;
1167 
1168     for (item = items; item != NULL; item = item->next) {
1169         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
1170         lpc++;
1171     }
1172 }
1173 
1174 static void display_list(pcmk__output_t *out, GList *items, const char *tag)
     /* [previous][next][first][last][top][bottom][index][help] */
1175 {
1176     GList *item = NULL;
1177 
1178     for (item = items; item != NULL; item = item->next) {
1179         out->info(out, "%s%s", tag, (const char *)item->data);
1180     }
1181 }
1182 
1183 /*!
1184  * \internal
1185  * \brief Upgrade XML to latest schema version and use it as scheduler input
1186  *
1187  * This also updates the scheduler timestamp to the current time.
1188  *
1189  * \param[in,out] scheduler  Scheduler data to update
1190  * \param[in,out] xml        XML to use as input
1191  *
1192  * \return Standard Pacemaker return code
1193  * \note On success, caller is responsible for freeing memory allocated for
1194  *       scheduler->now.
1195  * \todo This follows the example of other callers of cli_config_update()
1196  *       and returns ENOKEY ("Required key not available") if that fails,
1197  *       but perhaps pcmk_rc_schema_validation would be better in that case.
1198  */
1199 int
1200 update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml)
     /* [previous][next][first][last][top][bottom][index][help] */
1201 {
1202     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
1203         return ENOKEY;
1204     }
1205     scheduler->input = *xml;
1206     scheduler->now = crm_time_new(NULL);
1207     return pcmk_rc_ok;
1208 }
1209 
1210 /*!
1211  * \internal
1212  * \brief Update scheduler XML input based on a CIB query
1213  *
1214  * \param[in] scheduler  Scheduler data to initialize
1215  * \param[in] cib        Connection to the CIB manager
1216  *
1217  * \return Standard Pacemaker return code
1218  * \note On success, caller is responsible for freeing memory allocated for
1219  *       scheduler->input and scheduler->now.
1220  */
1221 static int
1222 update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler,
     /* [previous][next][first][last][top][bottom][index][help] */
1223                               cib_t *cib)
1224 {
1225     xmlNode *cib_xml_copy = NULL;
1226     int rc = pcmk_rc_ok;
1227 
1228     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
1229     rc = pcmk_legacy2rc(rc);
1230 
1231     if (rc != pcmk_rc_ok) {
1232         out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc);
1233         return rc;
1234     }
1235     rc = update_scheduler_input(scheduler, &cib_xml_copy);
1236     if (rc != pcmk_rc_ok) {
1237         out->err(out, "Could not upgrade the current CIB XML");
1238         free_xml(cib_xml_copy);
1239         return rc;
1240     }
1241 
1242     return rc;
1243 }
1244 
1245 // \return Standard Pacemaker return code
1246 static int
1247 update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate)
     /* [previous][next][first][last][top][bottom][index][help] */
1248 {
1249     char *pid = NULL;
1250     char *shadow_file = NULL;
1251     cib_t *shadow_cib = NULL;
1252     int rc = pcmk_rc_ok;
1253 
1254     pcmk__output_t *out = scheduler->priv;
1255 
1256     pe_reset_working_set(scheduler);
1257     pe__set_working_set_flags(scheduler,
1258                               pcmk_sched_no_counts|pcmk_sched_no_compat);
1259     rc = update_scheduler_input_to_cib(out, scheduler, cib);
1260     if (rc != pcmk_rc_ok) {
1261         return rc;
1262     }
1263 
1264     if(simulate) {
1265         bool prev_quiet = false;
1266 
1267         pid = pcmk__getpid_s();
1268         shadow_cib = cib_shadow_new(pid);
1269         shadow_file = get_shadow_file(pid);
1270 
1271         if (shadow_cib == NULL) {
1272             out->err(out, "Could not create shadow cib: '%s'", pid);
1273             rc = ENXIO;
1274             goto done;
1275         }
1276 
1277         rc = write_xml_file(scheduler->input, shadow_file, FALSE);
1278 
1279         if (rc < 0) {
1280             out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
1281             goto done;
1282         }
1283 
1284         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
1285         rc = pcmk_legacy2rc(rc);
1286 
1287         if (rc != pcmk_rc_ok) {
1288             out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_rc_str(rc), rc);
1289             goto done;
1290         }
1291 
1292         pcmk__schedule_actions(scheduler->input,
1293                                pcmk_sched_no_counts|pcmk_sched_no_compat,
1294                                scheduler);
1295 
1296         prev_quiet = out->is_quiet(out);
1297         out->quiet = true;
1298         pcmk__simulate_transition(scheduler, shadow_cib, NULL);
1299         out->quiet = prev_quiet;
1300 
1301         rc = update_dataset(shadow_cib, scheduler, false);
1302 
1303     } else {
1304         cluster_status(scheduler);
1305     }
1306 
1307   done:
1308     // Do not free scheduler->input here, we need rsc->xml to be valid later on
1309     cib_delete(shadow_cib);
1310     free(pid);
1311 
1312     if(shadow_file) {
1313         unlink(shadow_file);
1314         free(shadow_file);
1315     }
1316 
1317     return rc;
1318 }
1319 
1320 /*!
1321  * \internal
1322  * \brief Find the maximum stop timeout of a resource and its children (if any)
1323  *
1324  * \param[in,out] rsc  Resource to get timeout for
1325  *
1326  * \return Maximum stop timeout for \p rsc (in milliseconds)
1327  */
1328 static int
1329 max_rsc_stop_timeout(pcmk_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
1330 {
1331     long long result_ll;
1332     int max_delay = 0;
1333     xmlNode *config = NULL;
1334     GHashTable *meta = NULL;
1335 
1336     if (rsc == NULL) {
1337         return 0;
1338     }
1339 
1340     // If resource is collective, use maximum of its children's stop timeouts
1341     if (rsc->children != NULL) {
1342         for (GList *iter = rsc->children; iter; iter = iter->next) {
1343             pcmk_resource_t *child = iter->data;
1344             int delay = max_rsc_stop_timeout(child);
1345 
1346             if (delay > max_delay) {
1347                 pe_rsc_trace(rsc,
1348                              "Maximum stop timeout for %s is now %s due to %s",
1349                              rsc->id, pcmk__readable_interval(delay), child->id);
1350                 max_delay = delay;
1351             }
1352         }
1353         return max_delay;
1354     }
1355 
1356     // Get resource's stop action configuration from CIB
1357     config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true);
1358 
1359     /* Get configured timeout for stop action (fully evaluated for rules,
1360      * defaults, etc.).
1361      *
1362      * @TODO This currently ignores node (which might matter for rules)
1363      */
1364     meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config);
1365     if ((pcmk__scan_ll(g_hash_table_lookup(meta, XML_ATTR_TIMEOUT),
1366                        &result_ll, -1LL) == pcmk_rc_ok)
1367         && (result_ll >= 0) && (result_ll <= INT_MAX)) {
1368         max_delay = (int) result_ll;
1369     }
1370     g_hash_table_destroy(meta);
1371 
1372     return max_delay;
1373 }
1374 
1375 /*!
1376  * \internal
1377  * \brief Find a reasonable waiting time for stopping any one resource in a list
1378  *
1379  * \param[in,out] scheduler  Scheduler data
1380  * \param[in]     resources  List of names of resources that will be stopped
1381  *
1382  * \return Rough estimate of a reasonable time to wait (in seconds) to stop any
1383  *         one resource in \p resources
1384  * \note This estimate is very rough, simply the maximum stop timeout of all
1385  *       given resources and their children, plus a small fudge factor. It does
1386  *       not account for children that must be stopped in sequence, action
1387  *       throttling, or any demotions needed. It checks the stop timeout, even
1388  *       if the resources in question are actually being started.
1389  */
1390 static int
1391 wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources)
     /* [previous][next][first][last][top][bottom][index][help] */
1392 {
1393     int max_delay = 0;
1394 
1395     // Find maximum stop timeout in milliseconds
1396     for (const GList *item = resources; item != NULL; item = item->next) {
1397         pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
1398                                                 (const char *) item->data);
1399         int delay = max_rsc_stop_timeout(rsc);
1400 
1401         if (delay > max_delay) {
1402             pe_rsc_trace(rsc,
1403                          "Wait time is now %s due to %s",
1404                          pcmk__readable_interval(delay), rsc->id);
1405             max_delay = delay;
1406         }
1407     }
1408 
1409     return (max_delay / 1000) + 5;
1410 }
1411 
1412 #define waiting_for_starts(d, r, h) ((d != NULL) || \
1413                                     (!resource_is_running_on((r), (h))))
1414 
1415 /*!
1416  * \internal
1417  * \brief Restart a resource (on a particular host if requested).
1418  *
1419  * \param[in,out] out                 Output object
1420  * \param[in,out] rsc                 The resource to restart
1421  * \param[in]     node                Node to restart resource on (NULL for all)
1422  * \param[in]     move_lifetime       If not NULL, how long constraint should
1423  *                                    remain in effect (as ISO 8601 string)
1424  * \param[in]     timeout_ms          Consider failed if actions do not complete
1425  *                                    in this time (specified in milliseconds,
1426  *                                    but a two-second granularity is actually
1427  *                                    used; if 0, it will be calculated based on
1428  *                                    the resource timeout)
1429  * \param[in,out] cib                 Connection to the CIB manager
1430  * \param[in]     cib_options         Group of enum cib_call_options flags to
1431  *                                    use with CIB calls
1432  * \param[in]     promoted_role_only  If true, limit to promoted instances
1433  * \param[in]     force               If true, apply only to requested instance
1434  *                                    if part of a collective resource
1435  *
1436  * \return Standard Pacemaker return code (exits on certain failures)
1437  */
1438 int
1439 cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
1440                      const pcmk_node_t *node, const char *move_lifetime,
1441                      int timeout_ms, cib_t *cib, int cib_options,
1442                      gboolean promoted_role_only, gboolean force)
1443 {
1444     int rc = pcmk_rc_ok;
1445     int lpc = 0;
1446     int before = 0;
1447     int step_timeout_s = 0;
1448     int sleep_interval = 2;
1449     int timeout = timeout_ms / 1000;
1450 
1451     bool stop_via_ban = false;
1452     char *rsc_id = NULL;
1453     char *lookup_id = NULL;
1454     char *orig_target_role = NULL;
1455 
1456     GList *list_delta = NULL;
1457     GList *target_active = NULL;
1458     GList *current_active = NULL;
1459     GList *restart_target_active = NULL;
1460 
1461     pcmk_scheduler_t *scheduler = NULL;
1462     pcmk_resource_t *parent = uber_parent(rsc);
1463 
1464     bool running = false;
1465     const char *id = rsc->clone_name ? rsc->clone_name : rsc->id;
1466     const char *host = node ? node->details->uname : NULL;
1467 
1468     /* If the implicit resource or primitive resource of a bundle is given, operate on the
1469      * bundle itself instead.
1470      */
1471     if (pe_rsc_is_bundled(rsc)) {
1472         rsc = parent->parent;
1473     }
1474 
1475     running = resource_is_running_on(rsc, host);
1476 
1477     if (pe_rsc_is_clone(parent) && !running) {
1478         if (pe_rsc_is_unique_clone(parent)) {
1479             lookup_id = strdup(rsc->id);
1480         } else {
1481             lookup_id = clone_strip(rsc->id);
1482         }
1483 
1484         rsc = parent->fns->find_rsc(parent, lookup_id, node,
1485                                     pcmk_rsc_match_basename
1486                                     |pcmk_rsc_match_current_node);
1487         free(lookup_id);
1488         running = resource_is_running_on(rsc, host);
1489     }
1490 
1491     if (!running) {
1492         if (host) {
1493             out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
1494         } else {
1495             out->err(out, "%s is not running anywhere and so cannot be restarted", id);
1496         }
1497         return ENXIO;
1498     }
1499 
1500     if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
1501         out->err(out, "Unmanaged resources cannot be restarted.");
1502         return EAGAIN;
1503     }
1504 
1505     rsc_id = strdup(rsc->id);
1506 
1507     if (pe_rsc_is_unique_clone(parent)) {
1508         lookup_id = strdup(rsc->id);
1509     } else {
1510         lookup_id = clone_strip(rsc->id);
1511     }
1512 
1513     if (host) {
1514         if (pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) {
1515             stop_via_ban = true;
1516         } else if (pe_rsc_is_clone(parent)) {
1517             stop_via_ban = true;
1518             free(lookup_id);
1519             lookup_id = strdup(parent->id);
1520         }
1521     }
1522 
1523     /*
1524       grab full cib
1525       determine originally active resources
1526       disable or ban
1527       poll cib and watch for affected resources to get stopped
1528       without --timeout, calculate the stop timeout for each step and wait for that
1529       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
1530       if everything stopped, re-enable or un-ban
1531       poll cib and watch for affected resources to get started
1532       without --timeout, calculate the start timeout for each step and wait for that
1533       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
1534       report success
1535 
1536       Optimizations:
1537       - use constraints to determine ordered list of affected resources
1538       - Allow a --no-deps option (aka. --force-restart)
1539     */
1540 
1541     scheduler = pe_new_working_set();
1542     if (scheduler == NULL) {
1543         rc = errno;
1544         out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
1545         goto done;
1546     }
1547 
1548     scheduler->priv = out;
1549     rc = update_dataset(cib, scheduler, false);
1550 
1551     if(rc != pcmk_rc_ok) {
1552         out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc);
1553         goto done;
1554     }
1555 
1556     restart_target_active = get_active_resources(host, scheduler->resources);
1557     current_active = get_active_resources(host, scheduler->resources);
1558 
1559     dump_list(current_active, "Origin");
1560 
1561     if (stop_via_ban) {
1562         /* Stop the clone or bundle instance by banning it from the host */
1563         out->quiet = true;
1564         rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib,
1565                               cib_options, promoted_role_only,
1566                               PCMK__ROLE_PROMOTED);
1567     } else {
1568         /* Stop the resource by setting target-role to Stopped.
1569          * Remember any existing target-role so we can restore it later
1570          * (though it only makes any difference if it's Unpromoted).
1571          */
1572 
1573         find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
1574                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
1575         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
1576                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
1577                                            PCMK_ACTION_STOPPED, FALSE, cib,
1578                                            cib_options, force);
1579     }
1580     if(rc != pcmk_rc_ok) {
1581         out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc);
1582         if (current_active != NULL) {
1583             g_list_free_full(current_active, free);
1584             current_active = NULL;
1585         }
1586         if (restart_target_active != NULL) {
1587             g_list_free_full(restart_target_active, free);
1588             restart_target_active = NULL;
1589         }
1590         goto done;
1591     }
1592 
1593     rc = update_dataset(cib, scheduler, true);
1594     if(rc != pcmk_rc_ok) {
1595         out->err(out, "Could not determine which resources would be stopped");
1596         goto failure;
1597     }
1598 
1599     target_active = get_active_resources(host, scheduler->resources);
1600     dump_list(target_active, "Target");
1601 
1602     list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
1603     out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
1604     display_list(out, list_delta, " * ");
1605 
1606     step_timeout_s = timeout / sleep_interval;
1607     while (list_delta != NULL) {
1608         before = g_list_length(list_delta);
1609         if(timeout_ms == 0) {
1610             step_timeout_s = wait_time_estimate(scheduler, list_delta)
1611                              / sleep_interval;
1612         }
1613 
1614         /* We probably don't need the entire step timeout */
1615         for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
1616             sleep(sleep_interval);
1617             if(timeout) {
1618                 timeout -= sleep_interval;
1619                 crm_trace("%ds remaining", timeout);
1620             }
1621             rc = update_dataset(cib, scheduler, FALSE);
1622             if(rc != pcmk_rc_ok) {
1623                 out->err(out, "Could not determine which resources were stopped");
1624                 goto failure;
1625             }
1626 
1627             if (current_active != NULL) {
1628                 g_list_free_full(current_active, free);
1629             }
1630             current_active = get_active_resources(host, scheduler->resources);
1631 
1632             g_list_free(list_delta);
1633             list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
1634 
1635             dump_list(current_active, "Current");
1636             dump_list(list_delta, "Delta");
1637         }
1638 
1639         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
1640         if(before == g_list_length(list_delta)) {
1641             /* aborted during stop phase, print the contents of list_delta */
1642             out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
1643             display_list(out, list_delta, " * ");
1644             rc = ETIME;
1645             goto failure;
1646         }
1647 
1648     }
1649 
1650     if (stop_via_ban) {
1651         rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
1652 
1653     } else if (orig_target_role) {
1654         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
1655                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
1656                                            orig_target_role, FALSE, cib,
1657                                            cib_options, force);
1658         free(orig_target_role);
1659         orig_target_role = NULL;
1660     } else {
1661         rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
1662                                            NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
1663                                            cib_options, force);
1664     }
1665 
1666     if(rc != pcmk_rc_ok) {
1667         out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc);
1668         goto done;
1669     }
1670 
1671     if (target_active != NULL) {
1672         g_list_free_full(target_active, free);
1673     }
1674     target_active = restart_target_active;
1675 
1676     list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
1677     out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
1678     display_list(out, list_delta, " * ");
1679 
1680     step_timeout_s = timeout / sleep_interval;
1681     while (waiting_for_starts(list_delta, rsc, host)) {
1682         before = g_list_length(list_delta);
1683         if(timeout_ms == 0) {
1684             step_timeout_s = wait_time_estimate(scheduler, list_delta)
1685                              / sleep_interval;
1686         }
1687 
1688         /* We probably don't need the entire step timeout */
1689         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
1690 
1691             sleep(sleep_interval);
1692             if(timeout) {
1693                 timeout -= sleep_interval;
1694                 crm_trace("%ds remaining", timeout);
1695             }
1696 
1697             rc = update_dataset(cib, scheduler, false);
1698             if(rc != pcmk_rc_ok) {
1699                 out->err(out, "Could not determine which resources were started");
1700                 goto failure;
1701             }
1702 
1703             /* It's OK if dependent resources moved to a different node,
1704              * so we check active resources on all nodes.
1705              */
1706             if (current_active != NULL) {
1707                 g_list_free_full(current_active, free);
1708             }
1709             current_active = get_active_resources(NULL, scheduler->resources);
1710 
1711             g_list_free(list_delta);
1712             list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
1713             dump_list(current_active, "Current");
1714             dump_list(list_delta, "Delta");
1715         }
1716 
1717         if(before == g_list_length(list_delta)) {
1718             /* aborted during start phase, print the contents of list_delta */
1719             out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
1720             display_list(out, list_delta, " * ");
1721             rc = ETIME;
1722             goto failure;
1723         }
1724 
1725     }
1726 
1727     rc = pcmk_rc_ok;
1728     goto done;
1729 
1730   failure:
1731     if (stop_via_ban) {
1732         cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
1733     } else if (orig_target_role) {
1734         cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
1735                                       XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
1736                                       FALSE, cib, cib_options, force);
1737         free(orig_target_role);
1738     } else {
1739         cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
1740                                       NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
1741                                       cib_options, force);
1742     }
1743 
1744 done:
1745     if (list_delta != NULL) {
1746         g_list_free(list_delta);
1747     }
1748     if (current_active != NULL) {
1749         g_list_free_full(current_active, free);
1750     }
1751     if (target_active != NULL && (target_active != restart_target_active)) {
1752         g_list_free_full(target_active, free);
1753     }
1754     if (restart_target_active != NULL) {
1755         g_list_free_full(restart_target_active, free);
1756     }
1757     free(rsc_id);
1758     free(lookup_id);
1759     pe_free_working_set(scheduler);
1760     return rc;
1761 }
1762 
1763 static inline bool
1764 action_is_pending(const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
1765 {
1766     if (pcmk_any_flags_set(action->flags,
1767                            pcmk_action_optional|pcmk_action_pseudo)
1768         || !pcmk_is_set(action->flags, pcmk_action_runnable)
1769         || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
1770         return false;
1771     }
1772     return true;
1773 }
1774 
1775 /*!
1776  * \internal
1777  * \brief Check whether any actions in a list are pending
1778  *
1779  * \param[in] actions   List of actions to check
1780  *
1781  * \return true if any actions in the list are pending, otherwise false
1782  */
1783 static bool
1784 actions_are_pending(const GList *actions)
     /* [previous][next][first][last][top][bottom][index][help] */
1785 {
1786     for (const GList *action = actions; action != NULL; action = action->next) {
1787         const pcmk_action_t *a = (const pcmk_action_t *) action->data;
1788 
1789         if (action_is_pending(a)) {
1790             crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
1791             return true;
1792         }
1793     }
1794     return false;
1795 }
1796 
1797 static void
1798 print_pending_actions(pcmk__output_t *out, GList *actions)
     /* [previous][next][first][last][top][bottom][index][help] */
1799 {
1800     GList *action;
1801 
1802     out->info(out, "Pending actions:");
1803     for (action = actions; action != NULL; action = action->next) {
1804         pcmk_action_t *a = (pcmk_action_t *) action->data;
1805 
1806         if (!action_is_pending(a)) {
1807             continue;
1808         }
1809 
1810         if (a->node) {
1811             out->info(out, "\tAction %d: %s\ton %s",
1812                       a->id, a->uuid, pe__node_name(a->node));
1813         } else {
1814             out->info(out, "\tAction %d: %s", a->id, a->uuid);
1815         }
1816     }
1817 }
1818 
1819 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
1820 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
1821 
1822 /* For --wait, how long to sleep between cluster state checks */
1823 #define WAIT_SLEEP_S (2)
1824 
1825 /*!
1826  * \internal
1827  * \brief Wait until all pending cluster actions are complete
1828  *
1829  * This waits until either the CIB's transition graph is idle or a timeout is
1830  * reached.
1831  *
1832  * \param[in,out] out          Output object
1833  * \param[in]     timeout_ms   Consider failed if actions do not complete in
1834  *                             this time (specified in milliseconds, but
1835  *                             one-second granularity is actually used; if 0, a
1836  *                             default will be used)
1837  * \param[in,out] cib          Connection to the CIB manager
1838  *
1839  * \return Standard Pacemaker return code
1840  */
1841 int
1842 wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
     /* [previous][next][first][last][top][bottom][index][help] */
1843 {
1844     pcmk_scheduler_t *scheduler = NULL;
1845     xmlXPathObjectPtr search;
1846     int rc = pcmk_rc_ok;
1847     bool pending_unknown_state_resources;
1848     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
1849     time_t expire_time = time(NULL) + timeout_s;
1850     time_t time_diff;
1851     bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
1852 
1853     scheduler = pe_new_working_set();
1854     if (scheduler == NULL) {
1855         return ENOMEM;
1856     }
1857 
1858     do {
1859         /* Abort if timeout is reached */
1860         time_diff = expire_time - time(NULL);
1861         if (time_diff > 0) {
1862             crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff);
1863         } else {
1864             print_pending_actions(out, scheduler->actions);
1865             pe_free_working_set(scheduler);
1866             return ETIME;
1867         }
1868         if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
1869             sleep(WAIT_SLEEP_S);
1870         }
1871 
1872         /* Get latest transition graph */
1873         pe_reset_working_set(scheduler);
1874         rc = update_scheduler_input_to_cib(out, scheduler, cib);
1875         if (rc != pcmk_rc_ok) {
1876             pe_free_working_set(scheduler);
1877             return rc;
1878         }
1879         pcmk__schedule_actions(scheduler->input,
1880                                pcmk_sched_no_counts|pcmk_sched_no_compat,
1881                                scheduler);
1882 
1883         if (!printed_version_warning) {
1884             /* If the DC has a different version than the local node, the two
1885              * could come to different conclusions about what actions need to be
1886              * done. Warn the user in this case.
1887              *
1888              * @TODO A possible long-term solution would be to reimplement the
1889              * wait as a new controller operation that would be forwarded to the
1890              * DC. However, that would have potential problems of its own.
1891              */
1892             const char *dc_version = g_hash_table_lookup(scheduler->config_hash,
1893                                                          "dc-version");
1894 
1895             if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
1896                 out->info(out, "warning: wait option may not work properly in "
1897                           "mixed-version cluster");
1898                 printed_version_warning = true;
1899             }
1900         }
1901 
1902         search = xpath_search(scheduler->input, "/cib/status/node_state/lrm/lrm_resources/lrm_resource/"
1903                                                 XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_RC "='193']");
1904         pending_unknown_state_resources = (numXpathResults(search) > 0);
1905         freeXpathObject(search);
1906     } while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources);
1907 
1908     pe_free_working_set(scheduler);
1909     return rc;
1910 }
1911 
1912 static const char *
1913 get_action(const char *rsc_action) {
     /* [previous][next][first][last][top][bottom][index][help] */
1914     const char *action = NULL;
1915 
1916     if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
1917         action = PCMK_ACTION_VALIDATE_ALL;
1918 
1919     } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
1920         action = PCMK_ACTION_MONITOR;
1921 
1922     } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
1923                                     "force-demote", "force-promote", NULL)) {
1924         action = rsc_action+6;
1925     } else {
1926         action = rsc_action;
1927     }
1928 
1929     return action;
1930 }
1931 
1932 /*!
1933  * \brief Set up environment variables as expected by resource agents
1934  *
1935  * When the cluster executes resource agents, it adds certain environment
1936  * variables (directly or via resource meta-attributes) expected by some
1937  * resource agents. Add the essential ones that many resource agents expect, so
1938  * the behavior is the same for command-line execution.
1939  *
1940  * \param[in,out] params       Resource parameters that will be passed to agent
1941  * \param[in]     timeout_ms   Action timeout (in milliseconds)
1942  * \param[in]     check_level  OCF check level
1943  * \param[in]     verbosity    Verbosity level
1944  */
1945 static void
1946 set_agent_environment(GHashTable *params, int timeout_ms, int check_level,
     /* [previous][next][first][last][top][bottom][index][help] */
1947                       int verbosity)
1948 {
1949     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
1950                         crm_strdup_printf("%d", timeout_ms));
1951 
1952     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION),
1953                         strdup(CRM_FEATURE_SET));
1954 
1955     if (check_level >= 0) {
1956         char *level = crm_strdup_printf("%d", check_level);
1957 
1958         setenv("OCF_CHECK_LEVEL", level, 1);
1959         free(level);
1960     }
1961 
1962     pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true);
1963     if (verbosity > 1) {
1964         setenv("OCF_TRACE_RA", "1", 1);
1965     }
1966 
1967     /* A resource agent using the standard ocf-shellfuncs library will not print
1968      * messages to stderr if it doesn't have a controlling terminal (e.g. if
1969      * crm_resource is called via script or ssh). This forces it to do so.
1970      */
1971     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
1972 }
1973 
1974 /*!
1975  * \internal
1976  * \brief Apply command-line overrides to resource parameters
1977  *
1978  * \param[in,out] params     Parameters to be passed to agent
1979  * \param[in]     overrides  Parameters to override (or NULL if none)
1980  */
1981 static void
1982 apply_overrides(GHashTable *params, GHashTable *overrides)
     /* [previous][next][first][last][top][bottom][index][help] */
1983 {
1984     if (overrides != NULL) {
1985         GHashTableIter iter;
1986         char *name = NULL;
1987         char *value = NULL;
1988 
1989         g_hash_table_iter_init(&iter, overrides);
1990         while (g_hash_table_iter_next(&iter, (gpointer *) &name,
1991                                       (gpointer *) &value)) {
1992             g_hash_table_replace(params, strdup(name), strdup(value));
1993         }
1994     }
1995 }
1996 
1997 crm_exit_t
1998 cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
     /* [previous][next][first][last][top][bottom][index][help] */
1999                                  const char *rsc_class, const char *rsc_prov,
2000                                  const char *rsc_type, const char *rsc_action,
2001                                  GHashTable *params, GHashTable *override_hash,
2002                                  int timeout_ms, int resource_verbose, gboolean force,
2003                                  int check_level)
2004 {
2005     const char *class = rsc_class;
2006     const char *action = get_action(rsc_action);
2007     crm_exit_t exit_code = CRM_EX_OK;
2008     svc_action_t *op = NULL;
2009 
2010     // If no timeout was provided, use the same default as the cluster
2011     if (timeout_ms == 0) {
2012         timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
2013     }
2014 
2015     set_agent_environment(params, timeout_ms, check_level, resource_verbose);
2016     apply_overrides(params, override_hash);
2017 
2018     op = services__create_resource_action(rsc_name? rsc_name : "test",
2019                                           rsc_class, rsc_prov, rsc_type, action,
2020                                           0, timeout_ms, params, 0);
2021     if (op == NULL) {
2022         out->err(out, "Could not execute %s using %s%s%s:%s: %s",
2023                  action, rsc_class, (rsc_prov? ":" : ""),
2024                  (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
2025         g_hash_table_destroy(params);
2026         return CRM_EX_OSERR;
2027     }
2028 
2029     if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
2030         class = resources_find_service_class(rsc_type);
2031     }
2032     if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF,
2033                               PCMK_RESOURCE_CLASS_LSB, NULL)) {
2034         services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
2035                                 "Manual execution of the %s standard is "
2036                                 "unsupported", pcmk__s(class, "unspecified"));
2037     }
2038 
2039     if (op->rc != PCMK_OCF_UNKNOWN) {
2040         exit_code = op->rc;
2041         goto done;
2042     }
2043 
2044     services_action_sync(op);
2045 
2046     // Map results to OCF codes for consistent reporting to user
2047     {
2048         enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
2049 
2050         // Cast variable instead of function return to keep compilers happy
2051         exit_code = (crm_exit_t) ocf_code;
2052     }
2053 
2054 done:
2055     out->message(out, "resource-agent-action", resource_verbose, rsc_class,
2056                  rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
2057                  exit_code, op->status, services__exit_reason(op),
2058                  op->stdout_data, op->stderr_data);
2059     services_action_free(op);
2060     return exit_code;
2061 }
2062 
2063 crm_exit_t
2064 cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name,
     /* [previous][next][first][last][top][bottom][index][help] */
2065                      const char *rsc_action, GHashTable *override_hash,
2066                      int timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
2067                      int resource_verbose, gboolean force, int check_level)
2068 {
2069     pcmk__output_t *out = scheduler->priv;
2070     crm_exit_t exit_code = CRM_EX_OK;
2071     const char *rid = NULL;
2072     const char *rtype = NULL;
2073     const char *rprov = NULL;
2074     const char *rclass = NULL;
2075     GHashTable *params = NULL;
2076 
2077     if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
2078                                     "force-promote", NULL)) {
2079         if(pe_rsc_is_clone(rsc)) {
2080             GList *nodes = cli_resource_search(rsc, requested_name, scheduler);
2081             if(nodes != NULL && force == FALSE) {
2082                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
2083                          rsc_action, rsc->id);
2084                 out->err(out, "Try setting target-role=Stopped first or specifying "
2085                          "the force option");
2086                 return CRM_EX_UNSAFE;
2087             }
2088 
2089             g_list_free_full(nodes, free);
2090         }
2091     }
2092 
2093     if(pe_rsc_is_clone(rsc)) {
2094         /* Grab the first child resource in the hope it's not a group */
2095         rsc = rsc->children->data;
2096     }
2097 
2098     if (rsc->variant == pcmk_rsc_variant_group) {
2099         out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
2100         return CRM_EX_UNIMPLEMENT_FEATURE;
2101     } else if (pe_rsc_is_bundled(rsc)) {
2102         out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
2103         return CRM_EX_UNIMPLEMENT_FEATURE;
2104     }
2105 
2106     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2107     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
2108     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
2109 
2110     params = generate_resource_params(rsc, NULL /* @TODO use local node */,
2111                                       scheduler);
2112 
2113     if (timeout_ms == 0) {
2114         timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action),
2115                                                scheduler);
2116     }
2117 
2118     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
2119 
2120     exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
2121                                                  params, override_hash, timeout_ms,
2122                                                  resource_verbose, force, check_level);
2123     return exit_code;
2124 }
2125 
2126 // \return Standard Pacemaker return code
2127 int
2128 cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
     /* [previous][next][first][last][top][bottom][index][help] */
2129                   const char *host_name, const char *move_lifetime, cib_t *cib,
2130                   int cib_options, pcmk_scheduler_t *scheduler,
2131                   gboolean promoted_role_only, gboolean force)
2132 {
2133     pcmk__output_t *out = scheduler->priv;
2134     int rc = pcmk_rc_ok;
2135     unsigned int count = 0;
2136     pcmk_node_t *current = NULL;
2137     pcmk_node_t *dest = pe_find_node(scheduler->nodes, host_name);
2138     bool cur_is_dest = false;
2139 
2140     if (dest == NULL) {
2141         return pcmk_rc_node_unknown;
2142     }
2143 
2144     if (promoted_role_only
2145         && !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
2146 
2147         const pcmk_resource_t *p = pe__const_top_resource(rsc, false);
2148 
2149         if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) {
2150             out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
2151             rsc_id = p->id;
2152             rsc = p;
2153 
2154         } else {
2155             out->info(out, "Ignoring --promoted option: %s is not promotable",
2156                       rsc_id);
2157             promoted_role_only = FALSE;
2158         }
2159     }
2160 
2161     current = pe__find_active_requires(rsc, &count);
2162 
2163     if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
2164         unsigned int promoted_count = 0;
2165         pcmk_node_t *promoted_node = NULL;
2166 
2167         for (const GList *iter = rsc->children; iter; iter = iter->next) {
2168             const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
2169             enum rsc_role_e child_role = child->fns->state(child, TRUE);
2170 
2171             if (child_role == pcmk_role_promoted) {
2172                 rsc = child;
2173                 promoted_node = pe__current_node(child);
2174                 promoted_count++;
2175             }
2176         }
2177         if (promoted_role_only || (promoted_count != 0)) {
2178             count = promoted_count;
2179             current = promoted_node;
2180         }
2181 
2182     }
2183 
2184     if (count > 1) {
2185         if (pe_rsc_is_clone(rsc)) {
2186             current = NULL;
2187         } else {
2188             return pcmk_rc_multiple;
2189         }
2190     }
2191 
2192     if (current && (current->details == dest->details)) {
2193         cur_is_dest = true;
2194         if (force) {
2195             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
2196                      rsc_id, promoted_role_only?"promoted":"active",
2197                      pe__node_name(dest));
2198         } else {
2199             return pcmk_rc_already;
2200         }
2201     }
2202 
2203     /* Clear any previous prefer constraints across all nodes. */
2204     cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false,
2205                        force);
2206 
2207     /* Clear any previous ban constraints on 'dest'. */
2208     cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib,
2209                        cib_options, TRUE, force);
2210 
2211     /* Record an explicit preference for 'dest' */
2212     rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
2213                              cib, cib_options, promoted_role_only,
2214                              PCMK__ROLE_PROMOTED);
2215 
2216     crm_trace("%s%s now prefers %s%s",
2217               rsc->id, (promoted_role_only? " (promoted)" : ""),
2218               pe__node_name(dest), force?"(forced)":"");
2219 
2220     /* only ban the previous location if current location != destination location.
2221      * it is possible to use -M to enforce a location without regard of where the
2222      * resource is currently located */
2223     if (force && !cur_is_dest) {
2224         /* Ban the original location if possible */
2225         if(current) {
2226             (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
2227                                    cib, cib_options, promoted_role_only,
2228                                    PCMK__ROLE_PROMOTED);
2229         } else if(count > 1) {
2230             out->info(out, "Resource '%s' is currently %s in %d locations. "
2231                       "One may now move to %s",
2232                       rsc_id, (promoted_role_only? "promoted" : "active"),
2233                       count, pe__node_name(dest));
2234             out->info(out, "To prevent '%s' from being %s at a specific location, "
2235                       "specify a node.",
2236                       rsc_id, (promoted_role_only? "promoted" : "active"));
2237 
2238         } else {
2239             crm_trace("Not banning %s from its current location: not active", rsc_id);
2240         }
2241     }
2242 
2243     return rc;
2244 }

/* [previous][next][first][last][top][bottom][index][help] */