pacemaker 2.1.7-2.1.7
Scalable High-Availability cluster resource manager
Loading...
Searching...
No Matches
pcmk_scheduler.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2023 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <crm/crm.h>
13#include <crm/cib.h>
14#include <crm/msg_xml.h>
15#include <crm/common/xml.h>
18
19#include <glib.h>
20
21#include <crm/pengine/status.h>
22#include <pacemaker-internal.h>
24
26
42static void
43check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44 enum pcmk__check_parameters check)
45{
46 const char *reason = NULL;
47 op_digest_cache_t *digest_data = NULL;
48
49 switch (check) {
51 if (pcmk__check_action_config(rsc, node, rsc_op)
52 && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53 NULL)) {
54 reason = "action definition changed";
55 }
56 break;
57
59 digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60 rsc->cluster);
61 switch (digest_data->rc) {
63 crm_trace("Resource %s history entry %s on %s has "
64 "no digest to compare",
65 rsc->id, ID(rsc_op), node->details->id);
66 break;
68 break;
69 default:
70 reason = "resource parameters have changed";
71 break;
72 }
73 break;
74 }
75 if (reason != NULL) {
76 pe__clear_failcount(rsc, node, reason, rsc->cluster);
77 }
78}
79
90static bool
91failcount_clear_action_exists(const pcmk_node_t *node,
92 const pcmk_resource_t *rsc)
93{
95 TRUE);
96
97 if (list != NULL) {
98 g_list_free(list);
99 return true;
100 }
101 return false;
102}
103
111static void
112check_failure_threshold(gpointer data, gpointer user_data)
113{
114 pcmk_resource_t *rsc = data;
115 const pcmk_node_t *node = user_data;
116
117 // If this is a collective resource, apply recursively to children instead
118 if (rsc->children != NULL) {
119 g_list_foreach(rsc->children, check_failure_threshold, user_data);
120 return;
121 }
122
123 if (!failcount_clear_action_exists(node, rsc)) {
124 /* Don't force the resource away from this node due to a failcount
125 * that's going to be cleared.
126 *
127 * @TODO Failcount clearing can be scheduled in
128 * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
129 * schedule_resource_actions() via check_params(). This runs well before
130 * then, so it cannot detect those, meaning we might check the migration
131 * threshold when we shouldn't. Worst case, we stop or move the
132 * resource, then move it back in the next transition.
133 */
134 pcmk_resource_t *failed = NULL;
135
136 if (pcmk__threshold_reached(rsc, node, &failed)) {
137 resource_location(failed, node, -INFINITY, "__fail_limit__",
138 rsc->cluster);
139 }
140 }
141}
142
155static void
156apply_exclusive_discovery(gpointer data, gpointer user_data)
157{
158 pcmk_resource_t *rsc = data;
159 const pcmk_node_t *node = user_data;
160
161 if (rsc->exclusive_discover
162 || pe__const_top_resource(rsc, false)->exclusive_discover) {
163 pcmk_node_t *match = NULL;
164
165 // If this is a collective resource, apply recursively to children
166 g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
167
168 match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
169 if ((match != NULL)
171 match->weight = -INFINITY;
172 }
173 }
174}
175
183static void
184apply_stickiness(gpointer data, gpointer user_data)
185{
186 pcmk_resource_t *rsc = data;
187 pcmk_node_t *node = NULL;
188
189 // If this is a collective resource, apply recursively to children instead
190 if (rsc->children != NULL) {
191 g_list_foreach(rsc->children, apply_stickiness, NULL);
192 return;
193 }
194
195 /* A resource is sticky if it is managed, has stickiness configured, and is
196 * active on a single node.
197 */
199 || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
200 return;
201 }
202
203 node = rsc->running_on->data;
204
205 /* In a symmetric cluster, stickiness can always be used. In an
206 * asymmetric cluster, we have to check whether the resource is still
207 * allowed on the node, so we don't keep the resource somewhere it is no
208 * longer explicitly enabled.
209 */
211 && (g_hash_table_lookup(rsc->allowed_nodes,
212 node->details->id) == NULL)) {
213 pe_rsc_debug(rsc,
214 "Ignoring %s stickiness because the cluster is "
215 "asymmetric and %s is not explicitly allowed",
216 rsc->id, pe__node_name(node));
217 return;
218 }
219
220 pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
221 rsc->id, rsc->stickiness, pe__node_name(node));
222 resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
223}
224
231static void
232apply_shutdown_locks(pcmk_scheduler_t *scheduler)
233{
235 return;
236 }
237 for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
238 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
239
240 rsc->cmds->shutdown_lock(rsc);
241 }
242}
243
250static void
251count_available_nodes(pcmk_scheduler_t *scheduler)
252{
254 return;
255 }
256
257 // @COMPAT for API backward compatibility only (cluster does not use value)
258 for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
259 pcmk_node_t *node = (pcmk_node_t *) iter->data;
260
261 if ((node != NULL) && (node->weight >= 0) && node->details->online
262 && (node->details->type != node_ping)) {
264 }
265 }
266 crm_trace("Online node count: %d", scheduler->max_valid_nodes);
267}
268
269/*
270 * \internal
271 * \brief Apply node-specific scheduling criteria
272 *
273 * After the CIB has been unpacked, process node-specific scheduling criteria
274 * including shutdown locks, location constraints, resource stickiness,
275 * migration thresholds, and exclusive resource discovery.
276 */
277static void
278apply_node_criteria(pcmk_scheduler_t *scheduler)
279{
280 crm_trace("Applying node-specific scheduling criteria");
281 apply_shutdown_locks(scheduler);
282 count_available_nodes(scheduler);
284 g_list_foreach(scheduler->resources, apply_stickiness, NULL);
285
286 for (GList *node_iter = scheduler->nodes; node_iter != NULL;
287 node_iter = node_iter->next) {
288 for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
289 rsc_iter = rsc_iter->next) {
290 check_failure_threshold(rsc_iter->data, node_iter->data);
291 apply_exclusive_discovery(rsc_iter->data, node_iter->data);
292 }
293 }
294}
295
302static void
303assign_resources(pcmk_scheduler_t *scheduler)
304{
305 GList *iter = NULL;
306
307 crm_trace("Assigning resources to nodes");
308
309 if (!pcmk__str_eq(scheduler->placement_strategy, "default",
312 }
314
316 /* Assign remote connection resources first (which will also assign any
317 * colocation dependencies). If the connection is migrating, always
318 * prefer the partial migration target.
319 */
320 for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
321 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
322
323 if (rsc->is_remote_node) {
324 pe_rsc_trace(rsc, "Assigning remote connection resource '%s'",
325 rsc->id);
326 rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
327 }
328 }
329 }
330
331 /* now do the rest of the resources */
332 for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
333 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
334
335 if (!rsc->is_remote_node) {
336 pe_rsc_trace(rsc, "Assigning %s resource '%s'",
337 rsc->xml->name, rsc->id);
338 rsc->cmds->assign(rsc, NULL, true);
339 }
340 }
341
343}
344
352static void
353clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
354{
355 pcmk_resource_t *rsc = data;
356
357 if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
358 return;
359 }
360 crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
361
362 /* There's no need to recurse into rsc->children because those
363 * should just be unassigned clone instances.
364 */
365
366 for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
367 pcmk_node_t *node = (pcmk_node_t *) iter->data;
368 pcmk_action_t *clear_op = NULL;
369
370 if (!node->details->online) {
371 continue;
372 }
373 if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
374 continue;
375 }
376
377 clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
378 rsc->cluster);
379
380 /* We can't use order_action_then_stop() here because its
381 * pcmk__ar_guest_allowed breaks things
382 */
383 pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
384 NULL, pcmk__ar_ordered, rsc->cluster);
385 }
386}
387
394static void
395schedule_resource_actions(pcmk_scheduler_t *scheduler)
396{
397 // Process deferred action checks
398 pe__foreach_param_check(scheduler, check_params);
400
402 crm_trace("Scheduling probes");
404 }
405
407 g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
408 NULL);
409 }
410
411 crm_trace("Scheduling resource actions");
412 for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
413 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
414
415 rsc->cmds->create_actions(rsc);
416 }
417}
418
427static bool
428is_managed(const pcmk_resource_t *rsc)
429{
431 return true;
432 }
433 for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
434 if (is_managed((pcmk_resource_t *) iter->data)) {
435 return true;
436 }
437 }
438 return false;
439}
440
449static bool
450any_managed_resources(const pcmk_scheduler_t *scheduler)
451{
452 for (const GList *iter = scheduler->resources;
453 iter != NULL; iter = iter->next) {
454 if (is_managed((const pcmk_resource_t *) iter->data)) {
455 return true;
456 }
457 }
458 return false;
459}
460
470static bool
471needs_fencing(const pcmk_node_t *node, bool have_managed)
472{
473 return have_managed && node->details->unclean
474 && pe_can_fence(node->details->data_set, node);
475}
476
485static bool
486needs_shutdown(const pcmk_node_t *node)
487{
488 if (pe__is_guest_or_remote_node(node)) {
489 /* Do not send shutdown actions for Pacemaker Remote nodes.
490 * @TODO We might come up with a good use for this in the future.
491 */
492 return false;
493 }
494 return node->details->online && node->details->shutdown;
495}
496
507static GList *
508add_nondc_fencing(GList *list, pcmk_action_t *action,
510{
512 && (list != NULL)) {
513 /* Concurrent fencing is disabled, so order each non-DC
514 * fencing in a chain. If there is any DC fencing or
515 * shutdown, it will be ordered after the last action in the
516 * chain later.
517 */
519 }
520 return g_list_prepend(list, action);
521}
522
529static pcmk_action_t *
530schedule_fencing(pcmk_node_t *node)
531{
532 pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
533 FALSE, node->details->data_set);
534
535 pe_warn("Scheduling node %s for fencing", pe__node_name(node));
536 pcmk__order_vs_fence(fencing, node->details->data_set);
537 return fencing;
538}
539
546static void
547schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
548{
549 pcmk_action_t *dc_down = NULL;
550 bool integrity_lost = false;
551 bool have_managed = any_managed_resources(scheduler);
552 GList *fencing_ops = NULL;
553 GList *shutdown_ops = NULL;
554
555 crm_trace("Scheduling fencing and shutdowns as needed");
556 if (!have_managed) {
557 crm_notice("No fencing will be done until there are resources "
558 "to manage");
559 }
560
561 // Check each node for whether it needs fencing or shutdown
562 for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
563 pcmk_node_t *node = (pcmk_node_t *) iter->data;
564 pcmk_action_t *fencing = NULL;
565
566 /* Guest nodes are "fenced" by recovering their container resource,
567 * so handle them separately.
568 */
569 if (pe__is_guest_node(node)) {
570 if (node->details->remote_requires_reset && have_managed
571 && pe_can_fence(scheduler, node)) {
572 pcmk__fence_guest(node);
573 }
574 continue;
575 }
576
577 if (needs_fencing(node, have_managed)) {
578 fencing = schedule_fencing(node);
579
580 // Track DC and non-DC fence actions separately
581 if (node->details->is_dc) {
582 dc_down = fencing;
583 } else {
584 fencing_ops = add_nondc_fencing(fencing_ops, fencing,
585 scheduler);
586 }
587
588 } else if (needs_shutdown(node)) {
590
591 // Track DC and non-DC shutdown actions separately
592 if (node->details->is_dc) {
593 dc_down = down_op;
594 } else {
595 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
596 }
597 }
598
599 if ((fencing == NULL) && node->details->unclean) {
600 integrity_lost = true;
601 pe_warn("Node %s is unclean but cannot be fenced",
602 pe__node_name(node));
603 }
604 }
605
606 if (integrity_lost) {
608 pe_warn("Resource functionality and data integrity cannot be "
609 "guaranteed (configure, enable, and test fencing to "
610 "correct this)");
611
613 crm_notice("Unclean nodes will not be fenced until quorum is "
614 "attained or no-quorum-policy is set to ignore");
615 }
616 }
617
618 if (dc_down != NULL) {
619 /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
620 * DC elections. However, we don't want to order non-DC shutdowns before
621 * a DC *fencing*, because even though we don't want a node that's
622 * shutting down to become DC, the DC fencing could be ordered before a
623 * clone stop that's also ordered before the shutdowns, thus leading to
624 * a graph loop.
625 */
626 if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
628 pcmk__order_after_each(dc_down, shutdown_ops);
629 }
630
631 // Order any non-DC fencing before any DC fencing or shutdown
632
634 /* With concurrent fencing, order each non-DC fencing action
635 * separately before any DC fencing or shutdown.
636 */
637 pcmk__order_after_each(dc_down, fencing_ops);
638 } else if (fencing_ops != NULL) {
639 /* Without concurrent fencing, the non-DC fencing actions are
640 * already ordered relative to each other, so we just need to order
641 * the DC fencing after the last action in the chain (which is the
642 * first item in the list).
643 */
644 order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
646 }
647 }
648 g_list_free(fencing_ops);
649 g_list_free(shutdown_ops);
650}
651
652static void
653log_resource_details(pcmk_scheduler_t *scheduler)
654{
656 GList *all = NULL;
657
658 /* Due to the `crm_mon --node=` feature, out->message() for all the
659 * resource-related messages expects a list of nodes that we are allowed to
660 * output information for. Here, we create a wildcard to match all nodes.
661 */
662 all = g_list_prepend(all, (gpointer) "*");
663
664 for (GList *item = scheduler->resources; item != NULL; item = item->next) {
665 pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
666
667 // Log all resources except inactive orphans
669 || (rsc->role != pcmk_role_stopped)) {
670 out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
671 }
672 }
673
674 g_list_free(all);
675}
676
677static void
678log_all_actions(pcmk_scheduler_t *scheduler)
679{
680 /* This only ever outputs to the log, so ignore whatever output object was
681 * previously set and just log instead.
682 */
683 pcmk__output_t *prev_out = scheduler->priv;
684 pcmk__output_t *out = NULL;
685
686 if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
687 return;
688 }
689
692 pcmk__output_set_log_level(out, LOG_NOTICE);
693 scheduler->priv = out;
694
695 out->begin_list(out, NULL, NULL, "Actions");
697 out->end_list(out);
698 out->finish(out, CRM_EX_OK, true, NULL);
700
701 scheduler->priv = prev_out;
702}
703
710static void
711log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
712{
713 const uint64_t flags = pcmk_action_optional
716
717 crm_trace("Required but unrunnable actions:");
718 for (const GList *iter = scheduler->actions;
719 iter != NULL; iter = iter->next) {
720
721 const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
722
723 if (!pcmk_any_flags_set(action->flags, flags)) {
724 pcmk__log_action("\t", action, true);
725 }
726 }
727}
728
737static void
738unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
739{
740 const char* localhost_save = NULL;
741
743 crm_trace("Reusing previously calculated cluster status");
745 return;
746 }
747
748 if (scheduler->localhost) {
749 localhost_save = scheduler->localhost;
750 }
751
752 CRM_ASSERT(cib != NULL);
753 crm_trace("Calculating cluster status");
754
755 /* This will zero the entire struct without freeing anything first, so
756 * callers should never call pcmk__schedule_actions() with a populated data
757 * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
758 * previously called, whether directly or via pcmk__schedule_actions()).
759 */
761
762 if (localhost_save) {
763 scheduler->localhost = localhost_save;
764 }
765
767 scheduler->input = cib;
768 cluster_status(scheduler); // Sets pcmk_sched_have_status
769}
770
779void
780pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
782{
783 unpack_cib(cib, flags, scheduler);
788 return;
789 }
790
792 && pcmk__is_daemon) {
793 log_resource_details(scheduler);
794 }
795
796 apply_node_criteria(scheduler);
797
799 return;
800 }
801
804 assign_resources(scheduler);
805 schedule_resource_actions(scheduler);
806
807 /* Remote ordering constraints need to happen prior to calculating fencing
808 * because it is one more place we can mark nodes as needing fencing.
809 */
811
812 schedule_fencing_and_shutdowns(scheduler);
814 log_all_actions(scheduler);
816
817 if (get_crm_log_level() == LOG_TRACE) {
818 log_unrunnable_actions(scheduler);
819 }
820}
@ pcmk__ar_ordered
Actions are ordered (optionally, if no other flags are set)
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition actions.h:46
@ pcmk_action_runnable
Whether action is runnable.
Definition actions.h:241
@ pcmk_action_pseudo
Whether action does not require invoking an agent.
Definition actions.h:238
@ pcmk_action_optional
Whether action should not be executed.
Definition actions.h:244
#define PCMK_ACTION_DO_SHUTDOWN
Definition actions.h:51
Cluster Configuration.
bool pcmk__is_daemon
Definition logging.c:47
uint64_t flags
Definition remote.c:3
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition util.h:99
char data[0]
Definition cpg.c:10
A dumping ground.
#define INFINITY
Definition crm.h:98
@ pcmk__digest_match
@ pcmk__digest_unknown
@ pcmk__fc_effective
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
#define CRM_TRACE_INIT_DATA(name)
Definition logging.h:137
unsigned int get_crm_log_level(void)
Definition logging.c:1076
#define crm_notice(fmt, args...)
Definition logging.h:381
#define crm_trace(fmt, args...)
Definition logging.h:385
#define LOG_TRACE
Definition logging.h:38
#define ID(x)
Definition msg_xml.h:474
pcmk_scheduler_t * scheduler
@ node_ping
Definition nodes.h:37
@ pcmk_probe_exclusive
Never probe resource on node.
Definition nodes.h:51
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition output_log.c:369
void pcmk__output_free(pcmk__output_t *out)
Definition output.c:28
int pcmk__log_output_new(pcmk__output_t **out)
Definition output.c:272
const char * action
Definition pcmk_fence.c:30
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
void pcmk__register_lib_messages(pcmk__output_t *out)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition utils.c:36
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition failcounts.c:453
#define pe__set_working_set_flags(scheduler, flags_to_set)
Definition internal.h:52
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
void pe__free_param_checks(pcmk_scheduler_t *scheduler)
Definition remote.c:268
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition complex.c:962
gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags)
Definition utils.c:450
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition utils.c:360
#define stop_key(rsc)
Definition internal.h:378
#define pe_rsc_debug(rsc, fmt, args...)
Definition internal.h:36
#define pe_warn(fmt...)
Definition internal.h:44
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition failcounts.c:360
op_digest_cache_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition pe_digest.c:389
void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition remote.c:252
void pe__register_messages(pcmk__output_t *out)
Definition pe_output.c:3162
#define pe_rsc_trace(rsc, fmt, args...)
Definition internal.h:37
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
bool pe__is_guest_or_remote_node(const pcmk_node_t *node)
Definition remote.c:41
bool pe__is_guest_node(const pcmk_node_t *node)
Definition remote.c:33
@ pcmk_rsc_removed
Whether resource has been removed from the configuration.
Definition resources.h:103
@ pcmk_rsc_managed
Whether resource is managed.
Definition resources.h:106
#define CRM_ASSERT(expr)
Definition results.h:42
@ CRM_EX_OK
Success.
Definition results.h:240
@ pcmk_rc_ok
Definition results.h:154
@ pcmk_role_stopped
Stopped.
Definition roles.h:29
@ pcmk_sched_stop_removed_resources
Definition scheduler.h:95
@ pcmk_sched_symmetric_cluster
Whether cluster is symmetric (via symmetric-cluster property)
Definition scheduler.h:74
@ pcmk_sched_fencing_enabled
Whether fencing is enabled (via stonith-enabled property)
Definition scheduler.h:80
@ pcmk_sched_probe_resources
Definition scheduler.h:128
@ pcmk_sched_have_remote_nodes
Whether the cluster includes any Pacemaker Remote nodes (via CIB)
Definition scheduler.h:134
@ pcmk_sched_no_compat
Definition scheduler.h:155
@ pcmk_sched_shutdown_lock
Definition scheduler.h:122
@ pcmk_sched_location_only
Definition scheduler.h:143
@ pcmk_sched_quorate
Whether partition has quorum (via have-quorum property)
Definition scheduler.h:71
@ pcmk_sched_concurrent_fencing
Whether concurrent fencing is allowed (via concurrent-fencing property)
Definition scheduler.h:89
@ pcmk_sched_validate_only
Definition scheduler.h:168
@ pcmk_sched_have_status
Whether the CIB status section has been parsed yet.
Definition scheduler.h:131
pcmk__check_parameters
@ pcmk__check_last_failure
@ pcmk__check_active
Cluster status and scheduling.
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition status.c:71
void set_working_set_defaults(pcmk_scheduler_t *scheduler)
Definition status.c:368
@ pcmk__str_none
@ pcmk__str_casei
enum pcmk__digest_result rc
Definition internal.h:455
This structure contains everything that makes up a single output formatter.
void(* end_list)(pcmk__output_t *out)
int(* message)(pcmk__output_t *out, const char *message_id,...)
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
Implementation of pcmk_action_t.
Definition actions.h:390
char * task
Action name.
Definition actions.h:403
pcmk_resource_t * rsc
Resource to apply action to, if any.
Definition actions.h:400
Implementation of pcmk_node_t.
Definition nodes.h:130
int weight
Node score for a given resource.
Definition nodes.h:131
int rsc_discover_mode
Probe mode (enum pe_discover_e)
Definition nodes.h:137
struct pe_node_shared_s * details
Basic node information.
Definition nodes.h:134
gboolean shutdown
Whether shutting down.
Definition nodes.h:78
const char * id
Node ID at the cluster layer.
Definition nodes.h:67
gboolean online
Whether online.
Definition nodes.h:72
pcmk_scheduler_t * data_set
Cluster that node is part of.
Definition nodes.h:126
gboolean is_dc
Whether node is cluster's DC.
Definition nodes.h:80
gboolean unclean
Whether node requires fencing.
Definition nodes.h:76
gboolean remote_requires_reset
Definition nodes.h:88
enum node_type type
Node variant.
Definition nodes.h:69
Implementation of pcmk_resource_t.
Definition resources.h:399
pcmk_assignment_methods_t * cmds
Resource assignment methods.
Definition resources.h:417
GList * running_on
Nodes where resource may be active.
Definition resources.h:460
pcmk_node_t * partial_migration_target
The destination node, if migrate_to completed but migrate_from has not.
Definition resources.h:454
GList * children
Resource's child resources, if any.
Definition resources.h:475
pcmk_scheduler_t * cluster
Cluster that resource is part of.
Definition resources.h:412
gboolean exclusive_discover
Whether exclusive probing is enabled.
Definition resources.h:433
gboolean is_remote_node
Whether this is a remote connection.
Definition resources.h:432
char * id
Resource ID in configuration.
Definition resources.h:400
xmlNode * xml
Resource configuration (possibly expanded from template)
Definition resources.h:404
GHashTable * allowed_nodes
Nodes where resource may run (key is node ID, not name)
Definition resources.h:466
unsigned long long flags
Group of enum pcmk_rsc_flags.
Definition resources.h:429
enum rsc_role_e role
Resource's current role.
Definition resources.h:468
int stickiness
Extra preference for current node.
Definition resources.h:423
Implementation of pcmk_scheduler_t.
Definition scheduler.h:172
const char * placement_strategy
Value of placement-strategy property.
Definition scheduler.h:180
GList * actions
Scheduled actions.
Definition scheduler.h:204
xmlNode * input
CIB XML.
Definition scheduler.h:175
GList * resources
Resources in cluster.
Definition scheduler.h:196
unsigned long long flags
Group of enum pcmk_scheduler_flags.
Definition scheduler.h:183
void * priv
For Pacemaker use only.
Definition scheduler.h:229
GList * nodes
Nodes in cluster.
Definition scheduler.h:195
const char * localhost
Definition scheduler.h:216
void(* create_actions)(pcmk_resource_t *rsc)
void(* shutdown_lock)(pcmk_resource_t *rsc)
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
Wrappers for and extensions to libxml2.