pacemaker  2.0.3-4b1f869f0f
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2019 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
14 #include <pacemaker-internal.h>
15 #include <crm/services.h>
16 
17 // The controller removes the resource from the CIB, making this redundant
18 // #define DELETE_THEN_REFRESH 1
19 
20 #define INFINITY_HACK (INFINITY * -100)
21 
22 #define VARIANT_NATIVE 1
23 #include <lib/pengine/variant.h>
24 
25 void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh,
26  resource_t * rsc_rh, gboolean update_rh);
27 
28 void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh,
29  resource_t * rsc_rh, gboolean update_rh);
30 
31 static void Recurring(resource_t *rsc, action_t *start, node_t *node,
32  pe_working_set_t *data_set);
33 static void RecurringOp(resource_t *rsc, action_t *start, node_t *node,
34  xmlNode *operation, pe_working_set_t *data_set);
35 static void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node,
36  pe_working_set_t *data_set);
37 static void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node,
38  xmlNode *operation, pe_working_set_t *data_set);
39 
40 void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
41 gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
42 gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
45 gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional,
46  pe_working_set_t * data_set);
47 gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
48 gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
49 
50 /* *INDENT-OFF* */
52 /* Current State */
53 /* Next State: Unknown Stopped Started Slave Master */
59 };
60 
62 /* Current State */
63 /* Next State: Unknown Stopped Started Slave Master */
64  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
65  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
66  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
67  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
68  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
69 };
70 /* *INDENT-ON* */
71 
72 static gboolean
73 native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
74 {
75  GListPtr nodes = NULL;
76  node_t *chosen = NULL;
77  node_t *best = NULL;
78  int multiple = 1;
79  int length = 0;
80  gboolean result = FALSE;
81 
82  process_utilization(rsc, &prefer, data_set);
83 
84  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
85  return rsc->allocated_to ? TRUE : FALSE;
86  }
87 
88  // Sort allowed nodes by weight
89  if (rsc->allowed_nodes) {
90  length = g_hash_table_size(rsc->allowed_nodes);
91  }
92  if (length > 0) {
93  nodes = g_hash_table_get_values(rsc->allowed_nodes);
94  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
95 
96  // First node in sorted list has the best score
97  best = g_list_nth_data(nodes, 0);
98  }
99 
100  if (prefer && nodes) {
101  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
102 
103  if (chosen == NULL) {
104  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
105  prefer->details->uname, rsc->id);
106 
107  /* Favor the preferred node as long as its weight is at least as good as
108  * the best allowed node's.
109  *
110  * An alternative would be to favor the preferred node even if the best
111  * node is better, when the best node's weight is less than INFINITY.
112  */
113  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
114  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
115  chosen->details->uname, rsc->id);
116  chosen = NULL;
117 
118  } else if (!can_run_resources(chosen)) {
119  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
120  chosen->details->uname, rsc->id);
121  chosen = NULL;
122 
123  } else {
124  pe_rsc_trace(rsc,
125  "Chose preferred node %s for %s (ignoring %d candidates)",
126  chosen->details->uname, rsc->id, length);
127  }
128  }
129 
130  if ((chosen == NULL) && nodes) {
131  /* Either there is no preferred node, or the preferred node is not
132  * available, but there are other nodes allowed to run the resource.
133  */
134 
135  chosen = best;
136  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
137  chosen ? chosen->details->uname : "<none>", rsc->id, length);
138 
139  if (!pe_rsc_is_unique_clone(rsc->parent)
140  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
141  /* If the resource is already running on a node, prefer that node if
142  * it is just as good as the chosen node.
143  *
144  * We don't do this for unique clone instances, because
145  * distribute_children() has already assigned instances to their
146  * running nodes when appropriate, and if we get here, we don't want
147  * remaining unallocated instances to prefer a node that's already
148  * running another instance.
149  */
150  node_t *running = pe__current_node(rsc);
151 
152  if (running && (can_run_resources(running) == FALSE)) {
153  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
154  rsc->id, running->details->uname);
155  } else if (running) {
156  for (GList *iter = nodes->next; iter; iter = iter->next) {
157  node_t *tmp = (node_t *) iter->data;
158 
159  if (tmp->weight != chosen->weight) {
160  // The nodes are sorted by weight, so no more are equal
161  break;
162  }
163  if (tmp->details == running->details) {
164  // Scores are equal, so prefer the current node
165  chosen = tmp;
166  }
167  multiple++;
168  }
169  }
170  }
171  }
172 
173  if (multiple > 1) {
174  static char score[33];
175  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
176 
177  score2char_stack(chosen->weight, score, sizeof(score));
178  do_crm_log(log_level,
179  "Chose node %s for %s from %d nodes with score %s",
180  chosen->details->uname, rsc->id, multiple, score);
181  }
182 
183  result = native_assign_node(rsc, nodes, chosen, FALSE);
184  g_list_free(nodes);
185  return result;
186 }
187 
188 static int
189 node_list_attr_score(GHashTable * list, const char *attr, const char *value)
190 {
191  GHashTableIter iter;
192  node_t *node = NULL;
193  int best_score = -INFINITY;
194  const char *best_node = NULL;
195 
196  if (attr == NULL) {
197  attr = CRM_ATTR_UNAME;
198  }
199 
200  g_hash_table_iter_init(&iter, list);
201  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
202  int weight = node->weight;
203 
204  if (can_run_resources(node) == FALSE) {
205  weight = -INFINITY;
206  }
207  if (weight > best_score || best_node == NULL) {
208  const char *tmp = pe_node_attribute_raw(node, attr);
209 
210  if (safe_str_eq(value, tmp)) {
211  best_score = weight;
212  best_node = node->details->uname;
213  }
214  }
215  }
216 
217  if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
218  crm_info("Best score for %s=%s was %s with %d",
219  attr, value, best_node ? best_node : "<none>", best_score);
220  }
221 
222  return best_score;
223 }
224 
225 static void
226 node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor,
227  gboolean only_positive)
228 {
229  int score = 0;
230  int new_score = 0;
231  GHashTableIter iter;
232  node_t *node = NULL;
233 
234  if (attr == NULL) {
235  attr = CRM_ATTR_UNAME;
236  }
237 
238  g_hash_table_iter_init(&iter, list1);
239  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
240  float weight_f = 0;
241  int weight = 0;
242 
243  CRM_LOG_ASSERT(node != NULL);
244  if(node == NULL) { continue; };
245 
246  score = node_list_attr_score(list2, attr, pe_node_attribute_raw(node, attr));
247 
248  weight_f = factor * score;
249  /* Round the number */
250  /* http://c-faq.com/fp/round.html */
251  weight = (int)(weight_f < 0 ? weight_f - 0.5 : weight_f + 0.5);
252 
253  new_score = merge_weights(weight, node->weight);
254 
255  if (factor < 0 && score < 0) {
256  /* Negative preference for a node with a negative score
257  * should not become a positive preference
258  *
259  * TODO - Decide if we want to filter only if weight == -INFINITY
260  *
261  */
262  crm_trace("%s: Filtering %d + %f*%d (factor * score)",
263  node->details->uname, node->weight, factor, score);
264 
265  } else if (node->weight == INFINITY_HACK) {
266  crm_trace("%s: Filtering %d + %f*%d (node < 0)",
267  node->details->uname, node->weight, factor, score);
268 
269  } else if (only_positive && new_score < 0 && node->weight > 0) {
270  node->weight = INFINITY_HACK;
271  crm_trace("%s: Filtering %d + %f*%d (score > 0)",
272  node->details->uname, node->weight, factor, score);
273 
274  } else if (only_positive && new_score < 0 && node->weight == 0) {
275  crm_trace("%s: Filtering %d + %f*%d (score == 0)",
276  node->details->uname, node->weight, factor, score);
277 
278  } else {
279  crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score);
280  node->weight = new_score;
281  }
282  }
283 }
284 
285 GHashTable *
286 node_hash_dup(GHashTable * hash)
287 {
288  /* Hack! */
289  GListPtr list = g_hash_table_get_values(hash);
290  GHashTable *result = node_hash_from_list(list);
291 
292  g_list_free(list);
293  return result;
294 }
295 
296 GHashTable *
297 native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
298  float factor, enum pe_weights flags)
299 {
300  return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
301 }
302 
303 GHashTable *
304 rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
305  float factor, enum pe_weights flags)
306 {
307  GHashTable *work = NULL;
308  int multiplier = 1;
309 
310  if (factor < 0) {
311  multiplier = -1;
312  }
313 
314  if (is_set(rsc->flags, pe_rsc_merging)) {
315  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
316  return nodes;
317  }
318 
320 
321  if (is_set(flags, pe_weights_init)) {
322  if (rsc->variant == pe_group && rsc->children) {
323  GListPtr last = rsc->children;
324 
325  while (last->next != NULL) {
326  last = last->next;
327  }
328 
329  pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last);
330  work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags);
331 
332  } else {
333  work = node_hash_dup(rsc->allowed_nodes);
334  }
336 
337  } else if (rsc->variant == pe_group && rsc->children) {
338  GListPtr iter = rsc->children;
339 
340  pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id);
341  work = node_hash_dup(nodes);
342  for(iter = rsc->children; iter->next != NULL; iter = iter->next) {
343  work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags);
344  }
345 
346  } else {
347  pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id);
348  work = node_hash_dup(nodes);
349  node_hash_update(work, rsc->allowed_nodes, attr, factor,
350  is_set(flags, pe_weights_positive));
351  }
352 
353  if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) {
354  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id);
355  g_hash_table_destroy(work);
357  return nodes;
358  }
359 
360  if (can_run_any(work)) {
361  GListPtr gIter = NULL;
362 
363  if (is_set(flags, pe_weights_forward)) {
364  gIter = rsc->rsc_cons;
365  crm_trace("Checking %d additional colocation constraints", g_list_length(gIter));
366 
367  } else if(rsc->variant == pe_group && rsc->children) {
368  GListPtr last = rsc->children;
369 
370  while (last->next != NULL) {
371  last = last->next;
372  }
373 
374  gIter = ((resource_t*)last->data)->rsc_cons_lhs;
375  crm_trace("Checking %d additional optional group colocation constraints from %s",
376  g_list_length(gIter), ((resource_t*)last->data)->id);
377 
378  } else {
379  gIter = rsc->rsc_cons_lhs;
380  crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id);
381  }
382 
383  for (; gIter != NULL; gIter = gIter->next) {
384  resource_t *other = NULL;
385  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
386 
387  if (is_set(flags, pe_weights_forward)) {
388  other = constraint->rsc_rh;
389  } else {
390  other = constraint->rsc_lh;
391  }
392 
393  pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id);
394  work = rsc_merge_weights(other, rhs, work, constraint->node_attribute,
395  multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback);
396  dump_node_scores(LOG_TRACE, NULL, rhs, work);
397  }
398 
399  }
400 
401  if (is_set(flags, pe_weights_positive)) {
402  node_t *node = NULL;
403  GHashTableIter iter;
404 
405  g_hash_table_iter_init(&iter, work);
406  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
407  if (node->weight == INFINITY_HACK) {
408  node->weight = 1;
409  }
410  }
411  }
412 
413  if (nodes) {
414  g_hash_table_destroy(nodes);
415  }
416 
418  return work;
419 }
420 
421 static inline bool
422 node_has_been_unfenced(node_t *node)
423 {
424  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
425 
426  return unfenced && strcmp("0", unfenced);
427 }
428 
429 static inline bool
430 is_unfence_device(resource_t *rsc, pe_working_set_t *data_set)
431 {
432  return is_set(rsc->flags, pe_rsc_fence_device)
433  && is_set(data_set->flags, pe_flag_enable_unfencing);
434 }
435 
436 node_t *
437 native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
438 {
439  GListPtr gIter = NULL;
440  int alloc_details = scores_log_level + 1;
441 
442  if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
443  /* never allocate children on their own */
444  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
445  rsc->parent->id);
446  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
447  }
448 
449  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
450  return rsc->allocated_to;
451  }
452 
453  if (is_set(rsc->flags, pe_rsc_allocating)) {
454  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
455  return NULL;
456  }
457 
459  dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes);
460 
461  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
462  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
463 
464  GHashTable *archive = NULL;
465  resource_t *rsc_rh = constraint->rsc_rh;
466 
467  pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)",
468  rsc->id, constraint->id, rsc_rh->id,
469  constraint->score, role2text(constraint->role_lh));
470  if (constraint->role_lh >= RSC_ROLE_MASTER
471  || (constraint->score < 0 && constraint->score > -INFINITY)) {
472  archive = node_hash_dup(rsc->allowed_nodes);
473  }
474  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
475  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
476  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
477  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
478  g_hash_table_destroy(rsc->allowed_nodes);
479  rsc->allowed_nodes = archive;
480  archive = NULL;
481  }
482  if (archive) {
483  g_hash_table_destroy(archive);
484  }
485  }
486 
487  dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes);
488 
489  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
490  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
491 
492  rsc->allowed_nodes =
493  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
494  constraint->node_attribute,
495  (float)constraint->score / INFINITY,
497  }
498 
499  if (rsc->next_role == RSC_ROLE_STOPPED) {
500  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
501  /* make sure it doesn't come up again */
502  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
503 
504  } else if(rsc->next_role > rsc->role
505  && is_set(data_set->flags, pe_flag_have_quorum) == FALSE
506  && data_set->no_quorum_policy == no_quorum_freeze) {
507  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
508  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
509  rsc->next_role = rsc->role;
510  }
511 
512  dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__,
513  rsc->allowed_nodes);
514  if (is_set(data_set->flags, pe_flag_stonith_enabled)
515  && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
517  }
518 
519  if (is_not_set(rsc->flags, pe_rsc_managed)) {
520  const char *reason = NULL;
521  node_t *assign_to = NULL;
522 
523  rsc->next_role = rsc->role;
524  assign_to = pe__current_node(rsc);
525  if (assign_to == NULL) {
526  reason = "inactive";
527  } else if (rsc->role == RSC_ROLE_MASTER) {
528  reason = "master";
529  } else if (is_set(rsc->flags, pe_rsc_failed)) {
530  reason = "failed";
531  } else {
532  reason = "active";
533  }
534  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
535  (assign_to? assign_to->details->uname : "no node"), reason);
536  native_assign_node(rsc, NULL, assign_to, TRUE);
537 
538  } else if (is_set(data_set->flags, pe_flag_stop_everything)) {
539  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
540  native_assign_node(rsc, NULL, NULL, TRUE);
541 
542  } else if (is_set(rsc->flags, pe_rsc_provisional)
543  && native_choose_node(rsc, prefer, data_set)) {
544  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
545  rsc->allocated_to->details->uname);
546 
547  } else if (rsc->allocated_to == NULL) {
548  if (is_not_set(rsc->flags, pe_rsc_orphan)) {
549  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
550  } else if (rsc->running_on != NULL) {
551  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
552  }
553 
554  } else {
555  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
556  rsc->allocated_to->details->uname);
557  }
558 
560 
561  if (rsc->is_remote_node) {
562  node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
563 
564  CRM_ASSERT(remote_node != NULL);
565  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
566  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
567  remote_node->details->id);
568  remote_node->details->online = TRUE;
569  /* We shouldn't consider an unseen remote-node unclean if we are going
570  * to try and connect to it. Otherwise we get an unnecessary fence */
571  if (remote_node->details->unseen == TRUE) {
572  remote_node->details->unclean = FALSE;
573  }
574 
575  } else {
576  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
577  remote_node->details->id, role2text(rsc->next_role),
578  (rsc->allocated_to? "" : "un"));
579  remote_node->details->shutdown = TRUE;
580  }
581  }
582 
583  return rsc->allocated_to;
584 }
585 
586 static gboolean
587 is_op_dup(resource_t *rsc, const char *name, guint interval_ms)
588 {
589  gboolean dup = FALSE;
590  const char *id = NULL;
591  const char *value = NULL;
592  xmlNode *operation = NULL;
593  guint interval2_ms = 0;
594 
595  CRM_ASSERT(rsc);
596  for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
597  operation = __xml_next_element(operation)) {
598 
599  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
600  value = crm_element_value(operation, "name");
601  if (safe_str_neq(value, name)) {
602  continue;
603  }
604 
605  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
606  interval2_ms = crm_parse_interval_spec(value);
607  if (interval_ms != interval2_ms) {
608  continue;
609  }
610 
611  if (id == NULL) {
612  id = ID(operation);
613 
614  } else {
615  crm_config_err("Operation %s is a duplicate of %s", ID(operation), id);
617  ("Do not use the same (name, interval) combination more than once per resource");
618  dup = TRUE;
619  }
620  }
621  }
622 
623  return dup;
624 }
625 
626 static bool
627 op_cannot_recur(const char *name)
628 {
629  return safe_str_eq(name, RSC_STOP)
630  || safe_str_eq(name, RSC_START)
631  || safe_str_eq(name, RSC_DEMOTE)
632  || safe_str_eq(name, RSC_PROMOTE);
633 }
634 
635 static void
636 RecurringOp(resource_t * rsc, action_t * start, node_t * node,
637  xmlNode * operation, pe_working_set_t * data_set)
638 {
639  char *key = NULL;
640  const char *name = NULL;
641  const char *role = NULL;
642  const char *interval_spec = NULL;
643  const char *node_uname = node? node->details->uname : "n/a";
644 
645  guint interval_ms = 0;
646  action_t *mon = NULL;
647  gboolean is_optional = TRUE;
648  GListPtr possible_matches = NULL;
649 
650  CRM_ASSERT(rsc);
651 
652  /* Only process for the operations without role="Stopped" */
653  role = crm_element_value(operation, "role");
654  if (role && text2role(role) == RSC_ROLE_STOPPED) {
655  return;
656  }
657 
658  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
659  interval_ms = crm_parse_interval_spec(interval_spec);
660  if (interval_ms == 0) {
661  return;
662  }
663 
664  name = crm_element_value(operation, "name");
665  if (is_op_dup(rsc, name, interval_ms)) {
666  crm_trace("Not creating duplicate recurring action %s for %dms %s",
667  ID(operation), interval_ms, name);
668  return;
669  }
670 
671  if (op_cannot_recur(name)) {
672  crm_config_err("Ignoring %s because action '%s' cannot be recurring",
673  ID(operation), name);
674  return;
675  }
676 
677  key = generate_op_key(rsc->id, name, interval_ms);
678  if (find_rsc_op_entry(rsc, key) == NULL) {
679  crm_trace("Not creating recurring action %s for disabled resource %s",
680  ID(operation), rsc->id);
681  free(key);
682  return;
683  }
684 
685  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
686  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
687 
688  if (start != NULL) {
689  pe_rsc_trace(rsc, "Marking %s %s due to %s",
690  key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
691  start->uuid);
692  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
693  } else {
694  pe_rsc_trace(rsc, "Marking %s optional", key);
695  is_optional = TRUE;
696  }
697 
698  /* start a monitor for an already active resource */
699  possible_matches = find_actions_exact(rsc->actions, key, node);
700  if (possible_matches == NULL) {
701  is_optional = FALSE;
702  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
703 
704  } else {
705  GListPtr gIter = NULL;
706 
707  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
708  action_t *op = (action_t *) gIter->data;
709 
710  if (is_set(op->flags, pe_action_reschedule)) {
711  is_optional = FALSE;
712  break;
713  }
714  }
715  g_list_free(possible_matches);
716  }
717 
718  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
719  || (role != NULL && text2role(role) != rsc->next_role)) {
720  int log_level = LOG_TRACE;
721  const char *result = "Ignoring";
722 
723  if (is_optional) {
724  char *after_key = NULL;
725  action_t *cancel_op = NULL;
726 
727  // It's running, so cancel it
728  log_level = LOG_INFO;
729  result = "Cancelling";
730  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
731 
732  switch (rsc->role) {
733  case RSC_ROLE_SLAVE:
734  case RSC_ROLE_STARTED:
735  if (rsc->next_role == RSC_ROLE_MASTER) {
736  after_key = promote_key(rsc);
737 
738  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
739  after_key = stop_key(rsc);
740  }
741 
742  break;
743  case RSC_ROLE_MASTER:
744  after_key = demote_key(rsc);
745  break;
746  default:
747  break;
748  }
749 
750  if (after_key) {
751  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
752  pe_order_runnable_left, data_set);
753  }
754  }
755 
756  do_crm_log(log_level, "%s action %s (%s vs. %s)",
757  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
758  role2text(rsc->next_role));
759 
760  free(key);
761  return;
762  }
763 
764  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
765  key = mon->uuid;
766  if (is_optional) {
767  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
768  }
769 
770  if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
771  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
772  node_uname, mon->uuid);
773  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
774 
775  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
776  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
777  node_uname, mon->uuid);
778  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
779 
780  } else if (is_set(mon->flags, pe_action_optional) == FALSE) {
781  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
782  mon->task, interval_ms / 1000, rsc->id, node_uname);
783  }
784 
785  if (rsc->next_role == RSC_ROLE_MASTER) {
786  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
787 
788  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
789  free(running_master);
790  }
791 
792  if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
793  custom_action_order(rsc, start_key(rsc), NULL,
794  NULL, strdup(key), mon,
796 
797  custom_action_order(rsc, reload_key(rsc), NULL,
798  NULL, strdup(key), mon,
800 
801  if (rsc->next_role == RSC_ROLE_MASTER) {
802  custom_action_order(rsc, promote_key(rsc), NULL,
803  rsc, NULL, mon,
805 
806  } else if (rsc->role == RSC_ROLE_MASTER) {
807  custom_action_order(rsc, demote_key(rsc), NULL,
808  rsc, NULL, mon,
810  }
811  }
812 }
813 
814 static void
815 Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
816 {
817  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
818  (node == NULL || node->details->maintenance == FALSE)) {
819  xmlNode *operation = NULL;
820 
821  for (operation = __xml_first_child_element(rsc->ops_xml);
822  operation != NULL;
823  operation = __xml_next_element(operation)) {
824 
825  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
826  RecurringOp(rsc, start, node, operation, data_set);
827  }
828  }
829  }
830 }
831 
832 static void
833 RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
834  xmlNode * operation, pe_working_set_t * data_set)
835 {
836  char *key = NULL;
837  const char *name = NULL;
838  const char *role = NULL;
839  const char *interval_spec = NULL;
840  const char *node_uname = node? node->details->uname : "n/a";
841 
842  guint interval_ms = 0;
843  GListPtr possible_matches = NULL;
844  GListPtr gIter = NULL;
845 
846  /* Only process for the operations with role="Stopped" */
847  role = crm_element_value(operation, "role");
848  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
849  return;
850  }
851 
852  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
853  interval_ms = crm_parse_interval_spec(interval_spec);
854  if (interval_ms == 0) {
855  return;
856  }
857 
858  name = crm_element_value(operation, "name");
859  if (is_op_dup(rsc, name, interval_ms)) {
860  crm_trace("Not creating duplicate recurring action %s for %dms %s",
861  ID(operation), interval_ms, name);
862  return;
863  }
864 
865  if (op_cannot_recur(name)) {
866  crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
867  return;
868  }
869 
870  key = generate_op_key(rsc->id, name, interval_ms);
871  if (find_rsc_op_entry(rsc, key) == NULL) {
872  crm_trace("Not creating recurring action %s for disabled resource %s",
873  ID(operation), rsc->id);
874  free(key);
875  return;
876  }
877 
878  // @TODO add support
879  if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
880  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
881  "not supported for anonymous clones)",
882  ID(operation));
883  return;
884  }
885 
886  pe_rsc_trace(rsc,
887  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
888  ID(operation), rsc->id, role2text(rsc->next_role));
889 
890  /* if the monitor exists on the node where the resource will be running, cancel it */
891  if (node != NULL) {
892  possible_matches = find_actions_exact(rsc->actions, key, node);
893  if (possible_matches) {
894  action_t *cancel_op = NULL;
895 
896  g_list_free(possible_matches);
897 
898  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
899 
900  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
901  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
902  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
903  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
904  pe_order_runnable_left, data_set);
905  }
906 
907  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
908  key, role, role2text(rsc->next_role), node_uname);
909  }
910  }
911 
912  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
913  node_t *stop_node = (node_t *) gIter->data;
914  const char *stop_node_uname = stop_node->details->uname;
915  gboolean is_optional = TRUE;
916  gboolean probe_is_optional = TRUE;
917  gboolean stop_is_optional = TRUE;
918  action_t *stopped_mon = NULL;
919  char *rc_inactive = NULL;
920  GListPtr probe_complete_ops = NULL;
921  GListPtr stop_ops = NULL;
922  GListPtr local_gIter = NULL;
923 
924  if (node && safe_str_eq(stop_node_uname, node_uname)) {
925  continue;
926  }
927 
928  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
929  ID(operation), rsc->id, crm_str(stop_node_uname));
930 
931  /* start a monitor for an already stopped resource */
932  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
933  if (possible_matches == NULL) {
934  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
935  crm_str(stop_node_uname));
936  is_optional = FALSE;
937  } else {
938  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
939  crm_str(stop_node_uname));
940  is_optional = TRUE;
941  g_list_free(possible_matches);
942  }
943 
944  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
945 
946  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
947  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
948  free(rc_inactive);
949 
950  if (is_set(rsc->flags, pe_rsc_managed)) {
951  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
952  FALSE);
953  GListPtr pIter = NULL;
954 
955  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
956  action_t *probe = (action_t *) pIter->data;
957 
958  order_actions(probe, stopped_mon, pe_order_runnable_left);
959  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
960  }
961 
962  g_list_free(probes);
963  }
964 
965  if (probe_complete_ops) {
966  g_list_free(probe_complete_ops);
967  }
968 
969  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
970 
971  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
972  action_t *stop = (action_t *) local_gIter->data;
973 
974  if (is_set(stop->flags, pe_action_optional) == FALSE) {
975  stop_is_optional = FALSE;
976  }
977 
978  if (is_set(stop->flags, pe_action_runnable) == FALSE) {
979  crm_debug("%s\t %s (cancelled : stop un-runnable)",
980  crm_str(stop_node_uname), stopped_mon->uuid);
981  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
982  }
983 
984  if (is_set(rsc->flags, pe_rsc_managed)) {
985  custom_action_order(rsc, stop_key(rsc), stop,
986  NULL, strdup(key), stopped_mon,
988  }
989 
990  }
991 
992  if (stop_ops) {
993  g_list_free(stop_ops);
994  }
995 
996  if (is_optional == FALSE && probe_is_optional && stop_is_optional
997  && is_set(rsc->flags, pe_rsc_managed) == FALSE) {
998  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
999  key, crm_str(stop_node_uname));
1000  update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
1001  }
1002 
1003  if (is_set(stopped_mon->flags, pe_action_optional)) {
1004  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1005  }
1006 
1007  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1008  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1009  crm_str(stop_node_uname), stopped_mon->uuid);
1010  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1011  }
1012 
1013  if (is_set(stopped_mon->flags, pe_action_runnable)
1014  && is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
1015  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1016  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1017  }
1018  }
1019 
1020  free(key);
1021 }
1022 
1023 static void
1024 Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
1025 {
1026  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
1027  (node == NULL || node->details->maintenance == FALSE)) {
1028  xmlNode *operation = NULL;
1029 
1030  for (operation = __xml_first_child_element(rsc->ops_xml);
1031  operation != NULL;
1032  operation = __xml_next_element(operation)) {
1033 
1034  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
1035  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1036  }
1037  }
1038  }
1039 }
1040 
1041 static void
1042 handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set)
1043 {
1044  action_t *migrate_to = NULL;
1045  action_t *migrate_from = NULL;
1046  action_t *start = NULL;
1047  action_t *stop = NULL;
1048  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1049 
1050  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1051  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1052  start = start_action(rsc, chosen, TRUE);
1053  stop = stop_action(rsc, current, TRUE);
1054 
1055  if (partial == FALSE) {
1056  migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set);
1057  }
1058 
1059  migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1060 
1061  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1062 
1065 
1066  update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
1067 
1068  /* order probes before migrations */
1069  if (partial) {
1070  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1071  migrate_from->needs = start->needs;
1072 
1073  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1074  rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set);
1075 
1076  } else {
1077  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1079  migrate_to->needs = start->needs;
1080 
1081  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1082  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set);
1083  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1085  }
1086 
1087  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1089  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1091 
1092  }
1093 
1094  if (migrate_to) {
1095  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1097 
1098  /* Pacemaker Remote connections don't require pending to be recorded in
1099  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1100  */
1101  if (rsc->is_remote_node == FALSE) {
1102  /* migrate_to takes place on the source node, but can
1103  * have an effect on the target node depending on how
1104  * the agent is written. Because of this, we have to maintain
1105  * a record that the migrate_to occurred, in case the source node
1106  * loses membership while the migrate_to action is still in-flight.
1107  */
1108  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1109  }
1110  }
1111 
1112  if (migrate_from) {
1113  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1114  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1115  }
1116 }
1117 
1118 void
1120 {
1121  action_t *start = NULL;
1122  node_t *chosen = NULL;
1123  node_t *current = NULL;
1124  gboolean need_stop = FALSE;
1125  bool need_promote = FALSE;
1126  gboolean is_moving = FALSE;
1127  gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
1128 
1129  GListPtr gIter = NULL;
1130  unsigned int num_all_active = 0;
1131  unsigned int num_clean_active = 0;
1132  bool multiply_active = FALSE;
1133  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1134  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1135 
1136  CRM_ASSERT(rsc);
1137  chosen = rsc->allocated_to;
1138  if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
1139  rsc->next_role = RSC_ROLE_STARTED;
1140  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1141 
1142  } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
1143  rsc->next_role = RSC_ROLE_STOPPED;
1144  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1145  }
1146 
1147  pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
1148  role2text(rsc->role), role2text(rsc->next_role));
1149 
1150  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1151 
1152  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1153  node_t *dangling_source = (node_t *) gIter->data;
1154 
1155  action_t *stop = stop_action(rsc, dangling_source, FALSE);
1156 
1157  set_bit(stop->flags, pe_action_dangle);
1158  pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
1159  rsc->id, dangling_source->details->uname);
1160 
1161  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
1162  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1163  }
1164  }
1165 
1166  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1168  && (current->details == rsc->partial_migration_source->details)
1169  && (chosen->details == rsc->partial_migration_target->details)) {
1170 
1171  /* The chosen node is still the migration target from a partial
1172  * migration. Attempt to continue the migration instead of recovering
1173  * by stopping the resource everywhere and starting it on a single node.
1174  */
1175  pe_rsc_trace(rsc,
1176  "Will attempt to continue with a partial migration to target %s from %s",
1179 
1180  } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
1181  /* If a resource has "requires" set to nothing or quorum, don't consider
1182  * it active on unclean nodes (similar to how all resources behave when
1183  * stonith-enabled is false). We can start such resources elsewhere
1184  * before fencing completes, and if we considered the resource active on
1185  * the failed node, we would attempt recovery for being active on
1186  * multiple nodes.
1187  */
1188  multiply_active = (num_clean_active > 1);
1189  } else {
1190  multiply_active = (num_all_active > 1);
1191  }
1192 
1193  if (multiply_active) {
1195  // Migration was in progress, but we've chosen a different target
1196  crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
1199 
1200  } else {
1201  // Resource was incorrectly multiply active
1202  pe_proc_err("Resource %s is active on %u nodes (%s)",
1203  rsc->id, num_all_active,
1204  recovery2text(rsc->recovery_type));
1205  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1206  }
1207 
1208  if (rsc->recovery_type == recovery_stop_start) {
1209  need_stop = TRUE;
1210  }
1211 
1212  /* If by chance a partial migration is in process, but the migration
1213  * target is not chosen still, clear all partial migration data.
1214  */
1216  allow_migrate = FALSE;
1217  }
1218 
1219  if (is_set(rsc->flags, pe_rsc_start_pending)) {
1220  start = start_action(rsc, chosen, TRUE);
1222  }
1223 
1224  if (current && chosen && current->details != chosen->details) {
1225  pe_rsc_trace(rsc, "Moving %s", rsc->id);
1226  is_moving = TRUE;
1227  need_stop = TRUE;
1228 
1229  } else if (is_set(rsc->flags, pe_rsc_failed)) {
1230  if (is_set(rsc->flags, pe_rsc_stop)) {
1231  need_stop = TRUE;
1232  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1233  } else {
1234  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1235  if (rsc->next_role == RSC_ROLE_MASTER) {
1236  need_promote = TRUE;
1237  }
1238  }
1239 
1240  } else if (is_set(rsc->flags, pe_rsc_block)) {
1241  pe_rsc_trace(rsc, "Block %s", rsc->id);
1242  need_stop = TRUE;
1243 
1244  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1245  /* Recovery of a promoted resource */
1246  start = start_action(rsc, chosen, TRUE);
1247  if (is_set(start->flags, pe_action_optional) == FALSE) {
1248  pe_rsc_trace(rsc, "Forced start %s", rsc->id);
1249  need_stop = TRUE;
1250  }
1251  }
1252 
1253  pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
1254  role2text(rsc->role), role2text(rsc->next_role));
1255 
1256  /* Create any additional actions required when bringing resource down and
1257  * back up to same level.
1258  */
1259  role = rsc->role;
1260  while (role != RSC_ROLE_STOPPED) {
1261  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1262  pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1263  rsc->id, need_stop ? " required" : "");
1264  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1265  break;
1266  }
1267  role = next_role;
1268  }
1269 
1270 
1271  while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
1272  bool required = need_stop;
1273 
1274  next_role = rsc_state_matrix[role][rsc->role];
1275  if ((next_role == RSC_ROLE_MASTER) && need_promote) {
1276  required = true;
1277  }
1278  pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1279  rsc->id, (required? " required" : ""));
1280  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1281  data_set) == FALSE) {
1282  break;
1283  }
1284  role = next_role;
1285  }
1286  role = rsc->role;
1287 
1288  /* Required steps from this role to the next */
1289  while (role != rsc->next_role) {
1290  next_role = rsc_state_matrix[role][rsc->next_role];
1291  pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
1292  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1293  break;
1294  }
1295  role = next_role;
1296  }
1297 
1298  if(is_set(rsc->flags, pe_rsc_block)) {
1299  pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
1300 
1301  } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1302  pe_rsc_trace(rsc, "Monitor ops for active resource");
1303  start = start_action(rsc, chosen, TRUE);
1304  Recurring(rsc, start, chosen, data_set);
1305  Recurring_Stopped(rsc, start, chosen, data_set);
1306  } else {
1307  pe_rsc_trace(rsc, "Monitor ops for inactive resource");
1308  Recurring_Stopped(rsc, NULL, NULL, data_set);
1309  }
1310 
1311  /* if we are stuck in a partial migration, where the target
1312  * of the partial migration no longer matches the chosen target.
1313  * A full stop/start is required */
1314  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1315  pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
1316  allow_migrate = FALSE;
1317 
1318  } else if (is_moving == FALSE ||
1319  is_not_set(rsc->flags, pe_rsc_managed) ||
1320  is_set(rsc->flags, pe_rsc_failed) ||
1321  is_set(rsc->flags, pe_rsc_start_pending) ||
1322  (current && current->details->unclean) ||
1323  rsc->next_role < RSC_ROLE_STARTED) {
1324 
1325  allow_migrate = FALSE;
1326  }
1327 
1328  if (allow_migrate) {
1329  handle_migration_actions(rsc, current, chosen, data_set);
1330  }
1331 }
1332 
1333 static void
1334 rsc_avoids_remote_nodes(resource_t *rsc)
1335 {
1336  GHashTableIter iter;
1337  node_t *node = NULL;
1338  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1339  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1340  if (node->details->remote_rsc) {
1341  node->weight = -INFINITY;
1342  }
1343  }
1344 }
1345 
1360 static GList *
1361 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1362 {
1363  GList *allowed_nodes = NULL;
1364 
1365  if (rsc->allowed_nodes) {
1366  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1367  }
1368 
1369  if (is_set(data_set->flags, pe_flag_stdout)) {
1370  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1371  }
1372  return allowed_nodes;
1373 }
1374 
1375 void
1377 {
1378  /* This function is on the critical path and worth optimizing as much as possible */
1379 
1380  pe_resource_t *top = NULL;
1381  GList *allowed_nodes = NULL;
1382  bool check_unfencing = FALSE;
1383  bool check_utilization = FALSE;
1384 
1385  if (is_not_set(rsc->flags, pe_rsc_managed)) {
1386  pe_rsc_trace(rsc,
1387  "Skipping native constraints for unmanaged resource: %s",
1388  rsc->id);
1389  return;
1390  }
1391 
1392  top = uber_parent(rsc);
1393 
1394  // Whether resource requires unfencing
1395  check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
1396  && is_set(data_set->flags, pe_flag_enable_unfencing)
1397  && is_set(rsc->flags, pe_rsc_needs_unfencing);
1398 
1399  // Whether a non-default placement strategy is used
1400  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1401  && safe_str_neq(data_set->placement_strategy, "default");
1402 
1403  // Order stops before starts (i.e. restart)
1404  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1405  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1407  data_set);
1408 
1409  // Promotable ordering: demote before stop, start before promote
1410  if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
1411  custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1412  rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1413  pe_order_implies_first_master, data_set);
1414 
1415  custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1416  rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1417  pe_order_runnable_left, data_set);
1418  }
1419 
1420  // Certain checks need allowed nodes
1421  if (check_unfencing || check_utilization || rsc->container) {
1422  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1423  }
1424 
1425  if (check_unfencing) {
1426  /* Check if the node needs to be unfenced first */
1427 
1428  for (GList *item = allowed_nodes; item; item = item->next) {
1429  pe_node_t *node = item->data;
1430  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
1431 
1432  crm_debug("Ordering any stops of %s before %s, and any starts after",
1433  rsc->id, unfence->uuid);
1434 
1435  /*
1436  * It would be more efficient to order clone resources once,
1437  * rather than order each instance, but ordering the instance
1438  * allows us to avoid unnecessary dependencies that might conflict
1439  * with user constraints.
1440  *
1441  * @TODO: This constraint can still produce a transition loop if the
1442  * resource has a stop scheduled on the node being unfenced, and
1443  * there is a user ordering constraint to start some other resource
1444  * (which will be ordered after the unfence) before stopping this
1445  * resource. An example is "start some slow-starting cloned service
1446  * before stopping an associated virtual IP that may be moving to
1447  * it":
1448  * stop this -> unfencing -> start that -> stop this
1449  */
1450  custom_action_order(rsc, stop_key(rsc), NULL,
1451  NULL, strdup(unfence->uuid), unfence,
1453 
1454  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1455  rsc, start_key(rsc), NULL,
1457  data_set);
1458  }
1459  }
1460 
1461  if (check_utilization) {
1462  GListPtr gIter = NULL;
1463 
1464  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1465  rsc->id, data_set->placement_strategy);
1466 
1467  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1468  node_t *current = (node_t *) gIter->data;
1469 
1470  char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_');
1471  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1472 
1473  if (load_stopped->node == NULL) {
1474  load_stopped->node = node_copy(current);
1475  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1476  }
1477 
1478  custom_action_order(rsc, stop_key(rsc), NULL,
1479  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1480  }
1481 
1482  for (GList *item = allowed_nodes; item; item = item->next) {
1483  pe_node_t *next = item->data;
1484  char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_');
1485  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1486 
1487  if (load_stopped->node == NULL) {
1488  load_stopped->node = node_copy(next);
1489  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1490  }
1491 
1492  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1493  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1494 
1495  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1496  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1497  pe_order_load, data_set);
1498 
1499  free(load_stopped_task);
1500  }
1501  }
1502 
1503  if (rsc->container) {
1504  resource_t *remote_rsc = NULL;
1505 
1506  if (rsc->is_remote_node) {
1507  // rsc is the implicit remote connection for a guest or bundle node
1508 
1509  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1510  * to avoid nesting remotes. However, allow bundles to run on remote
1511  * nodes.
1512  */
1513  if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1514  rsc_avoids_remote_nodes(rsc->container);
1515  }
1516 
1517  /* If someone cleans up a guest or bundle node's container, we will
1518  * likely schedule a (re-)probe of the container and recovery of the
1519  * connection. Order the connection stop after the container probe,
1520  * so that if we detect the container running, we will trigger a new
1521  * transition and avoid the unnecessary recovery.
1522  */
1524  pe_order_optional, data_set);
1525 
1526  /* A user can specify that a resource must start on a Pacemaker Remote
1527  * node by explicitly configuring it with the container=NODENAME
1528  * meta-attribute. This is of questionable merit, since location
1529  * constraints can accomplish the same thing. But we support it, so here
1530  * we check whether a resource (that is not itself a remote connection)
1531  * has container set to a remote node or guest node resource.
1532  */
1533  } else if (rsc->container->is_remote_node) {
1534  remote_rsc = rsc->container;
1535  } else {
1536  remote_rsc = pe__resource_contains_guest_node(data_set,
1537  rsc->container);
1538  }
1539 
1540  if (remote_rsc) {
1541  /* Force the resource on the Pacemaker Remote node instead of
1542  * colocating the resource with the container resource.
1543  */
1544  for (GList *item = allowed_nodes; item; item = item->next) {
1545  pe_node_t *node = item->data;
1546 
1547  if (node->details->remote_rsc != remote_rsc) {
1548  node->weight = -INFINITY;
1549  }
1550  }
1551 
1552  } else {
1553  /* This resource is either a filler for a container that does NOT
1554  * represent a Pacemaker Remote node, or a Pacemaker Remote
1555  * connection resource for a guest node or bundle.
1556  */
1557  int score;
1558 
1559  crm_trace("Order and colocate %s relative to its container %s",
1560  rsc->id, rsc->container->id);
1561 
1563  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1565 
1566  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1567  rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL,
1568  pe_order_implies_first, data_set);
1569 
1570  if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1571  score = 10000; /* Highly preferred but not essential */
1572  } else {
1573  score = INFINITY; /* Force them to run on the same host */
1574  }
1575  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1576  rsc->container, NULL, NULL, data_set);
1577  }
1578  }
1579 
1580  if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
1581  /* don't allow remote nodes to run stonith devices
1582  * or remote connection resources.*/
1583  rsc_avoids_remote_nodes(rsc);
1584  }
1585  g_list_free(allowed_nodes);
1586 }
1587 
1588 void
1590  rsc_colocation_t *constraint,
1591  pe_working_set_t *data_set)
1592 {
1593  if (rsc_lh == NULL) {
1594  pe_err("rsc_lh was NULL for %s", constraint->id);
1595  return;
1596 
1597  } else if (constraint->rsc_rh == NULL) {
1598  pe_err("rsc_rh was NULL for %s", constraint->id);
1599  return;
1600  }
1601 
1602  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1603  rsc_rh->id);
1604 
1605  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1606 }
1607 
1610  rsc_colocation_t * constraint, gboolean preview)
1611 {
1612  if (constraint->score == 0) {
1613  return influence_nothing;
1614  }
1615 
1616  /* rh side must be allocated before we can process constraint */
1617  if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
1618  return influence_nothing;
1619  }
1620 
1621  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1622  rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1623  && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1624 
1625  /* LH and RH resources have already been allocated, place the correct
1626  * priority on LH rsc for the given promotable clone resource role */
1627  return influence_rsc_priority;
1628  }
1629 
1630  if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1631  // Log an error if we violated a mandatory colocation constraint
1632  const pe_node_t *rh_node = rsc_rh->allocated_to;
1633 
1634  if (rsc_lh->allocated_to == NULL) {
1635  // Dependent resource isn't allocated, so constraint doesn't matter
1636  return influence_nothing;
1637  }
1638 
1639  if (constraint->score >= INFINITY) {
1640  // Dependent resource must colocate with rh_node
1641 
1642  if ((rh_node == NULL)
1643  || (rh_node->details != rsc_lh->allocated_to->details)) {
1644  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1645  rsc_lh->id, rsc_rh->id,
1646  rsc_lh->allocated_to->details->uname,
1647  (rh_node? rh_node->details->uname : "unallocated"));
1648  }
1649 
1650  } else if (constraint->score <= -INFINITY) {
1651  // Dependent resource must anti-colocate with rh_node
1652 
1653  if ((rh_node != NULL)
1654  && (rsc_lh->allocated_to->details == rh_node->details)) {
1655  crm_err("%s and %s must be anti-colocated but are allocated "
1656  "to the same node (%s)",
1657  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1658  }
1659  }
1660  return influence_nothing;
1661  }
1662 
1663  if (constraint->score > 0
1664  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1665  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1666  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1667  return influence_nothing;
1668  }
1669 
1670  if (constraint->score > 0
1671  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1672  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1673  return FALSE;
1674  }
1675 
1676  if (constraint->score < 0
1677  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1678  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1679  role2text(constraint->role_lh));
1680  return influence_nothing;
1681  }
1682 
1683  if (constraint->score < 0
1684  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1685  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1686  role2text(constraint->role_rh));
1687  return influence_nothing;
1688  }
1689 
1690  return influence_rsc_location;
1691 }
1692 
1693 static void
1694 influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1695 {
1696  const char *rh_value = NULL;
1697  const char *lh_value = NULL;
1698  const char *attribute = CRM_ATTR_ID;
1699  int score_multiplier = 1;
1700 
1701  if (constraint->node_attribute != NULL) {
1702  attribute = constraint->node_attribute;
1703  }
1704 
1705  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1706  return;
1707  }
1708 
1709  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1710  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1711 
1712  if (!safe_str_eq(lh_value, rh_value)) {
1713  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1714  rsc_lh->priority = -INFINITY;
1715  }
1716  return;
1717  }
1718 
1719  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1720  return;
1721  }
1722 
1723  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1724  score_multiplier = -1;
1725  }
1726 
1727  rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority);
1728 }
1729 
1730 static void
1731 colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1732 {
1733  const char *tmp = NULL;
1734  const char *value = NULL;
1735  const char *attribute = CRM_ATTR_ID;
1736 
1737  GHashTable *work = NULL;
1738  gboolean do_check = FALSE;
1739 
1740  GHashTableIter iter;
1741  node_t *node = NULL;
1742 
1743  if (constraint->node_attribute != NULL) {
1744  attribute = constraint->node_attribute;
1745  }
1746 
1747  if (rsc_rh->allocated_to) {
1748  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1749  do_check = TRUE;
1750 
1751  } else if (constraint->score < 0) {
1752  /* nothing to do:
1753  * anti-colocation with something that is not running
1754  */
1755  return;
1756  }
1757 
1758  work = node_hash_dup(rsc_lh->allowed_nodes);
1759 
1760  g_hash_table_iter_init(&iter, work);
1761  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1762  tmp = pe_node_attribute_raw(node, attribute);
1763  if (do_check && safe_str_eq(tmp, value)) {
1764  if (constraint->score < INFINITY) {
1765  pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id,
1766  node->details->uname, constraint->score);
1767  node->weight = merge_weights(constraint->score, node->weight);
1768  }
1769 
1770  } else if (do_check == FALSE || constraint->score >= INFINITY) {
1771  pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id,
1772  node->details->uname, constraint->score,
1773  do_check ? "failed" : "unallocated");
1774  node->weight = merge_weights(-constraint->score, node->weight);
1775  }
1776  }
1777 
1778  if (can_run_any(work)
1779  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1780  g_hash_table_destroy(rsc_lh->allowed_nodes);
1781  rsc_lh->allowed_nodes = work;
1782  work = NULL;
1783 
1784  } else {
1785  static char score[33];
1786 
1787  score2char_stack(constraint->score, score, sizeof(score));
1788 
1789  pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)",
1790  rsc_lh->id, rsc_rh->id, do_check, score);
1791  }
1792 
1793  if (work) {
1794  g_hash_table_destroy(work);
1795  }
1796 }
1797 
1798 void
1800  rsc_colocation_t *constraint,
1801  pe_working_set_t *data_set)
1802 {
1803  enum filter_colocation_res filter_results;
1804 
1805  CRM_ASSERT(rsc_lh);
1806  CRM_ASSERT(rsc_rh);
1807  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1808  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)",
1809  constraint->score >= 0 ? "" : "Anti-",
1810  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1811 
1812  switch (filter_results) {
1814  influence_priority(rsc_lh, rsc_rh, constraint);
1815  break;
1817  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)",
1818  constraint->score >= 0 ? "" : "Anti-",
1819  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score);
1820  colocation_match(rsc_lh, rsc_rh, constraint);
1821  break;
1822  case influence_nothing:
1823  default:
1824  return;
1825  }
1826 }
1827 
1828 static gboolean
1829 filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1830 {
1831  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1832  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1833  role2text(rsc_ticket->role_lh));
1834  return FALSE;
1835  }
1836 
1837  return TRUE;
1838 }
1839 
1840 void
1842 {
1843  if (rsc_ticket == NULL) {
1844  pe_err("rsc_ticket was NULL");
1845  return;
1846  }
1847 
1848  if (rsc_lh == NULL) {
1849  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1850  return;
1851  }
1852 
1853  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1854  return;
1855  }
1856 
1857  if (rsc_lh->children) {
1858  GListPtr gIter = rsc_lh->children;
1859 
1860  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1861 
1862  for (; gIter != NULL; gIter = gIter->next) {
1863  resource_t *child_rsc = (resource_t *) gIter->data;
1864 
1865  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1866  }
1867  return;
1868  }
1869 
1870  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1871  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1872  role2text(rsc_ticket->role_lh));
1873 
1874  if ((rsc_ticket->ticket->granted == FALSE)
1875  && (rsc_lh->running_on != NULL)) {
1876 
1877  GListPtr gIter = NULL;
1878 
1879  switch (rsc_ticket->loss_policy) {
1880  case loss_ticket_stop:
1881  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1882  break;
1883 
1884  case loss_ticket_demote:
1885  // Promotion score will be set to -INFINITY in promotion_order()
1886  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
1887  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1888  }
1889  break;
1890 
1891  case loss_ticket_fence:
1892  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1893  return;
1894  }
1895 
1896  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1897 
1898  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
1899  node_t *node = (node_t *) gIter->data;
1900 
1901  pe_fence_node(data_set, node, "deadman ticket was lost");
1902  }
1903  break;
1904 
1905  case loss_ticket_freeze:
1906  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1907  return;
1908  }
1909  if (rsc_lh->running_on != NULL) {
1910  clear_bit(rsc_lh->flags, pe_rsc_managed);
1911  set_bit(rsc_lh->flags, pe_rsc_block);
1912  }
1913  break;
1914  }
1915 
1916  } else if (rsc_ticket->ticket->granted == FALSE) {
1917 
1918  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1919  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
1920  }
1921 
1922  } else if (rsc_ticket->ticket->standby) {
1923 
1924  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1925  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
1926  }
1927  }
1928 }
1929 
1930 enum pe_action_flags
1932 {
1933  return action->flags;
1934 }
1935 
1936 static inline bool
1937 is_primitive_action(pe_action_t *action)
1938 {
1939  return action && action->rsc && (action->rsc->variant == pe_native);
1940 }
1941 
1953 static void
1954 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
1955  enum pe_action_flags filter)
1956 {
1957  const char *reason = NULL;
1958 
1959  CRM_ASSERT(is_primitive_action(first));
1960  CRM_ASSERT(is_primitive_action(then));
1961 
1962  // We need to update the action in two cases:
1963 
1964  // ... if 'then' is required
1965  if (is_set(filter, pe_action_optional)
1966  && is_not_set(then->flags, pe_action_optional)) {
1967  reason = "restart";
1968  }
1969 
1970  /* ... if 'then' is unrunnable start of managed resource (if a resource
1971  * should restart but can't start, we still want to stop)
1972  */
1973  if (is_set(filter, pe_action_runnable)
1974  && is_not_set(then->flags, pe_action_runnable)
1975  && is_set(then->rsc->flags, pe_rsc_managed)
1976  && safe_str_eq(then->task, RSC_START)) {
1977  reason = "stop";
1978  }
1979 
1980  if (reason == NULL) {
1981  return;
1982  }
1983 
1984  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
1985  first->uuid, then->uuid, reason);
1986 
1987  // Make 'first' required if it is runnable
1988  if (is_set(first->flags, pe_action_runnable)) {
1989  pe_action_implies(first, then, pe_action_optional);
1990  }
1991 
1992  // Make 'first' required if 'then' is required
1993  if (is_not_set(then->flags, pe_action_optional)) {
1994  pe_action_implies(first, then, pe_action_optional);
1995  }
1996 
1997  // Make 'first' unmigratable if 'then' is unmigratable
1998  if (is_not_set(then->flags, pe_action_migrate_runnable)) {
2000  }
2001 
2002  // Make 'then' unrunnable if 'first' is required but unrunnable
2003  if (is_not_set(first->flags, pe_action_optional)
2004  && is_not_set(first->flags, pe_action_runnable)) {
2005  pe_action_implies(then, first, pe_action_runnable);
2006  }
2007 }
2008 
2009 enum pe_graph_flags
2011  enum pe_action_flags flags, enum pe_action_flags filter,
2012  enum pe_ordering type, pe_working_set_t *data_set)
2013 {
2014  /* flags == get_action_flags(first, then_node) called from update_action() */
2015  enum pe_graph_flags changed = pe_graph_none;
2016  enum pe_action_flags then_flags = then->flags;
2017  enum pe_action_flags first_flags = first->flags;
2018 
2019  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2020  first->uuid, first->node ? first->node->details->uname : "[none]",
2021  first->flags, then->uuid, then->flags);
2022 
2023  if (type & pe_order_asymmetrical) {
2024  resource_t *then_rsc = then->rsc;
2025  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2026 
2027  if (!then_rsc) {
2028  /* ignore */
2029  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
2030  /* ignore... if 'then' is supposed to be stopped after 'first', but
2031  * then is already stopped, there is nothing to be done when non-symmetrical. */
2032  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2033  && safe_str_eq(then->task, RSC_START)
2034  && is_set(then->flags, pe_action_optional)
2035  && then->node
2036  && g_list_length(then_rsc->running_on) == 1
2037  && then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
2038  /* Ignore. If 'then' is supposed to be started after 'first', but
2039  * 'then' is already started, there is nothing to be done when
2040  * asymmetrical -- unless the start is mandatory, which indicates
2041  * the resource is restarting, and the ordering is still needed.
2042  */
2043  } else if (!(first->flags & pe_action_runnable)) {
2044  /* prevent 'then' action from happening if 'first' is not runnable and
2045  * 'then' has not yet occurred. */
2046  pe_action_implies(then, first, pe_action_optional);
2047  pe_action_implies(then, first, pe_action_runnable);
2048 
2049  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2050  } else {
2051  /* ignore... then is allowed to start/stop if it wants to. */
2052  }
2053  }
2054 
2055  if (type & pe_order_implies_first) {
2056  if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
2057  // Needs is_set(first_flags, pe_action_optional) too?
2058  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2059  pe_action_implies(first, then, pe_action_optional);
2060  }
2061 
2062  if (is_set(flags, pe_action_migrate_runnable) &&
2063  is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
2064  is_set(then->flags, pe_action_optional) == FALSE) {
2065 
2066  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2067  first->uuid, then->uuid);
2069  }
2070  }
2071 
2073  if ((filter & pe_action_optional) &&
2074  ((then->flags & pe_action_optional) == FALSE) &&
2075  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2076  pe_action_implies(first, then, pe_action_optional);
2077 
2078  if (is_set(first->flags, pe_action_migrate_runnable) &&
2079  is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
2080 
2081  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2083  }
2084  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2085  }
2086  }
2087 
2089  && is_set(filter, pe_action_optional)) {
2090 
2091  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2092  ((then->flags & pe_action_runnable) == FALSE)) {
2093 
2094  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2095  pe_action_implies(first, then, pe_action_runnable);
2096  }
2097 
2098  if ((then->flags & pe_action_optional) == 0) {
2099  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2100  pe_action_implies(first, then, pe_action_optional);
2101  }
2102  }
2103 
2104  if ((type & pe_order_pseudo_left)
2105  && is_set(filter, pe_action_optional)) {
2106 
2107  if ((first->flags & pe_action_runnable) == FALSE) {
2110  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2111  }
2112 
2113  }
2114 
2115  if (is_set(type, pe_order_runnable_left)
2116  && is_set(filter, pe_action_runnable)
2117  && is_set(then->flags, pe_action_runnable)
2118  && is_set(flags, pe_action_runnable) == FALSE) {
2119  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2120  pe_action_implies(then, first, pe_action_runnable);
2122  }
2123 
2124  if (is_set(type, pe_order_implies_then)
2125  && is_set(filter, pe_action_optional)
2126  && is_set(then->flags, pe_action_optional)
2127  && is_set(flags, pe_action_optional) == FALSE) {
2128 
2129  /* in this case, treat migrate_runnable as if first is optional */
2130  if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
2131  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2132  pe_action_implies(then, first, pe_action_optional);
2133  }
2134  }
2135 
2136  if (is_set(type, pe_order_restart)) {
2137  handle_restart_ordering(first, then, filter);
2138  }
2139 
2140  if (then_flags != then->flags) {
2141  changed |= pe_graph_updated_then;
2142  pe_rsc_trace(then->rsc,
2143  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2144  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2145  then_flags, first->uuid, first->flags);
2146 
2147  if(then->rsc && then->rsc->parent) {
2148  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2149  update_action(then, data_set);
2150  }
2151  }
2152 
2153  if (first_flags != first->flags) {
2154  changed |= pe_graph_updated_first;
2155  pe_rsc_trace(first->rsc,
2156  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2157  first->uuid, first->node ? first->node->details->uname : "[none]",
2158  first->flags, first_flags, then->uuid, then->flags);
2159  }
2160 
2161  return changed;
2162 }
2163 
2164 void
2166 {
2167  GListPtr gIter = NULL;
2168  GHashTableIter iter;
2169  node_t *node = NULL;
2170 
2171  if (constraint == NULL) {
2172  pe_err("Constraint is NULL");
2173  return;
2174 
2175  } else if (rsc == NULL) {
2176  pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
2177  return;
2178  }
2179 
2180  pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
2181  role2text(constraint->role_filter), rsc->id);
2182 
2183  /* take "lifetime" into account */
2184  if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
2185  pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
2186  constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
2187  return;
2188  }
2189 
2190  if (constraint->node_list_rh == NULL) {
2191  pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
2192  return;
2193  }
2194 
2195  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2196  node_t *node = (node_t *) gIter->data;
2197  node_t *other_node = NULL;
2198 
2199  other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2200 
2201  if (other_node != NULL) {
2202  pe_rsc_trace(rsc, "%s + %s: %d + %d",
2203  node->details->uname,
2204  other_node->details->uname, node->weight, other_node->weight);
2205  other_node->weight = merge_weights(other_node->weight, node->weight);
2206 
2207  } else {
2208  other_node = node_copy(node);
2209 
2210  pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
2211  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2212  }
2213 
2214  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2215  if (constraint->discover_mode == pe_discover_exclusive) {
2216  rsc->exclusive_discover = TRUE;
2217  }
2218  /* exclusive > never > always... always is default */
2219  other_node->rsc_discover_mode = constraint->discover_mode;
2220  }
2221  }
2222 
2223  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
2224  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
2225  pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
2226  }
2227 }
2228 
2229 void
2231 {
2232  GListPtr gIter = NULL;
2233 
2234  CRM_ASSERT(rsc);
2235  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2236 
2237  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2238  action_t *action = (action_t *) gIter->data;
2239 
2240  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2241  graph_element_from_action(action, data_set);
2242  }
2243 
2244  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2245  resource_t *child_rsc = (resource_t *) gIter->data;
2246 
2247  child_rsc->cmds->expand(child_rsc, data_set);
2248  }
2249 }
2250 
2251 #define log_change(a, fmt, args...) do { \
2252  if(a && a->reason && terminal) { \
2253  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2254  } else if(a && a->reason) { \
2255  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2256  } else if(terminal) { \
2257  printf(" * "fmt"\n", ##args); \
2258  } else { \
2259  crm_notice(fmt, ##args); \
2260  } \
2261  } while(0)
2262 
2263 #define STOP_SANITY_ASSERT(lineno) do { \
2264  if(current && current->details->unclean) { \
2265  /* It will be a pseudo op */ \
2266  } else if(stop == NULL) { \
2267  crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
2268  CRM_ASSERT(stop != NULL); \
2269  } else if(is_set(stop->flags, pe_action_optional)) { \
2270  crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
2271  CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
2272  } \
2273  } while(0)
2274 
2275 static int rsc_width = 5;
2276 static int detail_width = 5;
2277 static void
2278 LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2279 {
2280  int len = 0;
2281  char *reason = NULL;
2282  char *details = NULL;
2283  bool same_host = FALSE;
2284  bool same_role = FALSE;
2285  bool need_role = FALSE;
2286 
2287  CRM_ASSERT(action);
2288  CRM_ASSERT(destination != NULL || origin != NULL);
2289 
2290  if(source == NULL) {
2291  source = action;
2292  }
2293 
2294  len = strlen(rsc->id);
2295  if(len > rsc_width) {
2296  rsc_width = len + 2;
2297  }
2298 
2299  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2300  need_role = TRUE;
2301  }
2302 
2303  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2304  same_host = TRUE;
2305  }
2306 
2307  if(rsc->role == rsc->next_role) {
2308  same_role = TRUE;
2309  }
2310 
2311  if(need_role && origin == NULL) {
2312  /* Promoting from Stopped */
2313  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2314 
2315  } else if(need_role && destination == NULL) {
2316  /* Demoting a Master or Stopping a Slave */
2317  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2318 
2319  } else if(origin == NULL || destination == NULL) {
2320  /* Starting or stopping a resource */
2321  details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname);
2322 
2323  } else if(need_role && same_role && same_host) {
2324  /* Recovering or restarting a promotable clone resource */
2325  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2326 
2327  } else if(same_role && same_host) {
2328  /* Recovering or Restarting a normal resource */
2329  details = crm_strdup_printf("%s", origin->details->uname);
2330 
2331  } else if(same_role && need_role) {
2332  /* Moving a promotable clone resource */
2333  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2334 
2335  } else if(same_role) {
2336  /* Moving a normal resource */
2337  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2338 
2339  } else if(same_host) {
2340  /* Promoting or demoting a promotable clone resource */
2341  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2342 
2343  } else {
2344  /* Moving and promoting/demoting */
2345  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2346  }
2347 
2348  len = strlen(details);
2349  if(len > detail_width) {
2350  detail_width = len;
2351  }
2352 
2353  if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
2354  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2355 
2356  } else if(source->reason) {
2357  reason = crm_strdup_printf(" due to %s", source->reason);
2358 
2359  } else if(is_not_set(action->flags, pe_action_runnable)) {
2360  reason = strdup(" blocked");
2361 
2362  } else {
2363  reason = strdup("");
2364  }
2365 
2366  if(terminal) {
2367  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2368  } else {
2369  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2370  }
2371 
2372  free(details);
2373  free(reason);
2374 }
2375 
2376 
2377 void
2378 LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2379 {
2380  node_t *next = NULL;
2381  node_t *current = NULL;
2382  pe_node_t *start_node = NULL;
2383 
2384  action_t *stop = NULL;
2385  action_t *start = NULL;
2386  action_t *demote = NULL;
2387  action_t *promote = NULL;
2388 
2389  char *key = NULL;
2390  gboolean moving = FALSE;
2391  GListPtr possible_matches = NULL;
2392 
2393  if(rsc->variant == pe_container) {
2394  pcmk__bundle_log_actions(rsc, data_set, terminal);
2395  return;
2396  }
2397 
2398  if (rsc->children) {
2399  GListPtr gIter = NULL;
2400 
2401  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2402  resource_t *child_rsc = (resource_t *) gIter->data;
2403 
2404  LogActions(child_rsc, data_set, terminal);
2405  }
2406  return;
2407  }
2408 
2409  next = rsc->allocated_to;
2410  if (rsc->running_on) {
2411  current = pe__current_node(rsc);
2412  if (rsc->role == RSC_ROLE_STOPPED) {
2413  /*
2414  * This can occur when resources are being recovered
2415  * We fiddle with the current role in native_create_actions()
2416  */
2417  rsc->role = RSC_ROLE_STARTED;
2418  }
2419  }
2420 
2421  if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
2422  /* Don't log stopped orphans */
2423  return;
2424  }
2425 
2426  if (is_not_set(rsc->flags, pe_rsc_managed)
2427  || (current == NULL && next == NULL)) {
2428  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2429  rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
2430  pe_rsc_managed) ? " unmanaged" : "");
2431  return;
2432  }
2433 
2434  if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
2435  moving = TRUE;
2436  }
2437 
2438  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2439  if (possible_matches) {
2440  start = possible_matches->data;
2441  g_list_free(possible_matches);
2442  }
2443 
2444  if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
2445  start_node = NULL;
2446  } else {
2447  start_node = current;
2448  }
2449  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2450  if (possible_matches) {
2451  stop = possible_matches->data;
2452  g_list_free(possible_matches);
2453  }
2454 
2455  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2456  if (possible_matches) {
2457  promote = possible_matches->data;
2458  g_list_free(possible_matches);
2459  }
2460 
2461  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2462  if (possible_matches) {
2463  demote = possible_matches->data;
2464  g_list_free(possible_matches);
2465  }
2466 
2467  if (rsc->role == rsc->next_role) {
2468  action_t *migrate_op = NULL;
2469 
2470  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2471  if (possible_matches) {
2472  migrate_op = possible_matches->data;
2473  }
2474 
2475  CRM_CHECK(next != NULL,);
2476  if (next == NULL) {
2477  } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
2478  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2479 
2480  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2481  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2482 
2483 
2484  } else if (start == NULL || is_set(start->flags, pe_action_optional)) {
2485  if ((demote != NULL) && (promote != NULL)
2486  && is_not_set(demote->flags, pe_action_optional)
2487  && is_not_set(promote->flags, pe_action_optional)) {
2488  LogAction("Re-promote", rsc, current, next, promote, demote,
2489  terminal);
2490  } else {
2491  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id,
2492  role2text(rsc->role), next->details->uname);
2493  }
2494 
2495  } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
2496  LogAction("Stop", rsc, current, NULL, stop,
2497  (stop && stop->reason)? stop : start, terminal);
2498  STOP_SANITY_ASSERT(__LINE__);
2499 
2500  } else if (moving && current) {
2501  LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
2502  rsc, current, next, stop, NULL, terminal);
2503 
2504  } else if (is_set(rsc->flags, pe_rsc_failed)) {
2505  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2506  STOP_SANITY_ASSERT(__LINE__);
2507 
2508  } else {
2509  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2510  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2511  }
2512 
2513  g_list_free(possible_matches);
2514  return;
2515  }
2516 
2517  if(stop
2518  && (rsc->next_role == RSC_ROLE_STOPPED
2519  || (start && is_not_set(start->flags, pe_action_runnable)))) {
2520 
2521  GListPtr gIter = NULL;
2522 
2523  key = stop_key(rsc);
2524  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2525  node_t *node = (node_t *) gIter->data;
2526  action_t *stop_op = NULL;
2527 
2528  possible_matches = find_actions(rsc->actions, key, node);
2529  if (possible_matches) {
2530  stop_op = possible_matches->data;
2531  g_list_free(possible_matches);
2532  }
2533 
2534  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2535  STOP_SANITY_ASSERT(__LINE__);
2536  }
2537 
2538  LogAction("Stop", rsc, node, NULL, stop_op,
2539  (stop_op && stop_op->reason)? stop_op : start, terminal);
2540  }
2541 
2542  free(key);
2543 
2544  } else if (stop && is_set(rsc->flags, pe_rsc_failed)
2545  && is_set(rsc->flags, pe_rsc_stop)) {
2546  /* 'stop' may be NULL if the failure was ignored */
2547  LogAction("Recover", rsc, current, next, stop, start, terminal);
2548  STOP_SANITY_ASSERT(__LINE__);
2549 
2550  } else if (moving) {
2551  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2552  STOP_SANITY_ASSERT(__LINE__);
2553 
2554  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2555  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2556 
2557  } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
2558  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2559  STOP_SANITY_ASSERT(__LINE__);
2560 
2561  } else if (rsc->role == RSC_ROLE_MASTER) {
2562  CRM_LOG_ASSERT(current != NULL);
2563  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2564 
2565  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2566  CRM_LOG_ASSERT(next);
2567  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2568 
2569  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2570  LogAction("Start", rsc, current, next, start, NULL, terminal);
2571  }
2572 }
2573 
2574 gboolean
2575 StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2576 {
2577  GListPtr gIter = NULL;
2578 
2579  CRM_ASSERT(rsc);
2580  pe_rsc_trace(rsc, "%s", rsc->id);
2581 
2582  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2583  node_t *current = (node_t *) gIter->data;
2584  action_t *stop;
2585 
2586  if (rsc->partial_migration_target) {
2587  if (rsc->partial_migration_target->details == current->details) {
2588  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2589  next->details->uname, rsc->id);
2590  continue;
2591  } else {
2592  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2593  optional = FALSE;
2594  }
2595  }
2596 
2597  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2598  stop = stop_action(rsc, current, optional);
2599 
2600  if(rsc->allocated_to == NULL) {
2601  pe_action_set_reason(stop, "node availability", TRUE);
2602  }
2603 
2604  if (is_not_set(rsc->flags, pe_rsc_managed)) {
2605  update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2606  }
2607 
2608  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
2609  DeleteRsc(rsc, current, optional, data_set);
2610  }
2611 
2612  if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2613  action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set);
2614 
2615  order_actions(stop, unfence, pe_order_implies_first);
2616  if (!node_has_been_unfenced(current)) {
2617  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2618  }
2619  }
2620  }
2621 
2622  return TRUE;
2623 }
2624 
2625 static void
2626 order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action,
2627  enum pe_ordering order, pe_working_set_t *data_set)
2628 {
2629  /* When unfencing is in use, we order unfence actions before any probe or
2630  * start of resources that require unfencing, and also of fence devices.
2631  *
2632  * This might seem to violate the principle that fence devices require
2633  * only quorum. However, fence agents that unfence often don't have enough
2634  * information to even probe or start unless the node is first unfenced.
2635  */
2636  if (is_unfence_device(rsc, data_set)
2637  || is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2638 
2639  /* Start with an optional ordering. Requiring unfencing would result in
2640  * the node being unfenced, and all its resources being stopped,
2641  * whenever a new resource is added -- which would be highly suboptimal.
2642  */
2643  action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
2644 
2645  order_actions(unfence, action, order);
2646 
2647  if (!node_has_been_unfenced(node)) {
2648  // But unfencing is required if it has never been done
2649  char *reason = crm_strdup_printf("required by %s %s",
2650  rsc->id, action->task);
2651 
2652  trigger_unfencing(NULL, node, reason, NULL, data_set);
2653  free(reason);
2654  }
2655  }
2656 }
2657 
2658 gboolean
2659 StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2660 {
2661  action_t *start = NULL;
2662 
2663  CRM_ASSERT(rsc);
2664  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2665  start = start_action(rsc, next, TRUE);
2666 
2667  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2668 
2669  if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
2670  update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2671  }
2672 
2673 
2674  return TRUE;
2675 }
2676 
2677 gboolean
2678 PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2679 {
2680  GListPtr gIter = NULL;
2681  gboolean runnable = TRUE;
2682  GListPtr action_list = NULL;
2683 
2684  CRM_ASSERT(rsc);
2685  CRM_CHECK(next != NULL, return FALSE);
2686  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2687 
2688  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2689 
2690  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2691  action_t *start = (action_t *) gIter->data;
2692 
2693  if (is_set(start->flags, pe_action_runnable) == FALSE) {
2694  runnable = FALSE;
2695  }
2696  }
2697  g_list_free(action_list);
2698 
2699  if (runnable) {
2700  promote_action(rsc, next, optional);
2701  return TRUE;
2702  }
2703 
2704  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2705 
2706  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2707 
2708  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2709  action_t *promote = (action_t *) gIter->data;
2710 
2711  update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2712  }
2713 
2714  g_list_free(action_list);
2715  return TRUE;
2716 }
2717 
2718 gboolean
2719 DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2720 {
2721  GListPtr gIter = NULL;
2722 
2723  CRM_ASSERT(rsc);
2724  pe_rsc_trace(rsc, "%s", rsc->id);
2725 
2726 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2727  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2728  node_t *current = (node_t *) gIter->data;
2729 
2730  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2731  demote_action(rsc, current, optional);
2732  }
2733  return TRUE;
2734 }
2735 
2736 gboolean
2737 RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2738 {
2739  CRM_ASSERT(rsc);
2740  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2741  CRM_CHECK(FALSE, return FALSE);
2742  return FALSE;
2743 }
2744 
2745 gboolean
2746 NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2747 {
2748  CRM_ASSERT(rsc);
2749  pe_rsc_trace(rsc, "%s", rsc->id);
2750  return FALSE;
2751 }
2752 
2753 gboolean
2754 DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set)
2755 {
2756  if (is_set(rsc->flags, pe_rsc_failed)) {
2757  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2758  return FALSE;
2759 
2760  } else if (node == NULL) {
2761  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2762  return FALSE;
2763 
2764  } else if (node->details->unclean || node->details->online == FALSE) {
2765  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2766  node->details->uname);
2767  return FALSE;
2768  }
2769 
2770  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2771 
2772  delete_action(rsc, node, optional);
2773 
2774  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2775  optional ? pe_order_implies_then : pe_order_optional, data_set);
2776 
2777  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2778  optional ? pe_order_implies_then : pe_order_optional, data_set);
2779 
2780  return TRUE;
2781 }
2782 
2783 gboolean
2784 native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
2785  gboolean force, pe_working_set_t * data_set)
2786 {
2788  char *key = NULL;
2789  action_t *probe = NULL;
2790  node_t *running = NULL;
2791  node_t *allowed = NULL;
2792  resource_t *top = uber_parent(rsc);
2793 
2794  static const char *rc_master = NULL;
2795  static const char *rc_inactive = NULL;
2796 
2797  if (rc_inactive == NULL) {
2798  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2799  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2800  }
2801 
2802  CRM_CHECK(node != NULL, return FALSE);
2803  if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
2804  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2805  return FALSE;
2806  }
2807 
2808  if (pe__is_guest_or_remote_node(node)) {
2809  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2810 
2812  pe_rsc_trace(rsc,
2813  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2814  rsc->id, node->details->id);
2815  return FALSE;
2816  } else if (pe__is_guest_node(node)
2817  && pe__resource_contains_guest_node(data_set, rsc)) {
2818  pe_rsc_trace(rsc,
2819  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2820  rsc->id, node->details->id);
2821  return FALSE;
2822  } else if (rsc->is_remote_node) {
2823  pe_rsc_trace(rsc,
2824  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2825  rsc->id, node->details->id);
2826  return FALSE;
2827  }
2828  }
2829 
2830  if (rsc->children) {
2831  GListPtr gIter = NULL;
2832  gboolean any_created = FALSE;
2833 
2834  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2835  resource_t *child_rsc = (resource_t *) gIter->data;
2836 
2837  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2838  || any_created;
2839  }
2840 
2841  return any_created;
2842 
2843  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2844  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2845  return FALSE;
2846  }
2847 
2848  if (is_set(rsc->flags, pe_rsc_orphan)) {
2849  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2850  return FALSE;
2851  }
2852 
2853  // Check whether resource is already known on node
2854  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2855  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2856  return FALSE;
2857  }
2858 
2859  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2860 
2861  if (rsc->exclusive_discover || top->exclusive_discover) {
2862  if (allowed == NULL) {
2863  /* exclusive discover is enabled and this node is not in the allowed list. */
2864  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2865  return FALSE;
2866  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2867  /* exclusive discover is enabled and this node is not marked
2868  * as a node this resource should be discovered on */
2869  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2870  return FALSE;
2871  }
2872  }
2873 
2874  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2875  /* If this node was allowed to host this resource it would
2876  * have been explicitly added to the 'allowed_nodes' list.
2877  * However it wasn't and the node has discovery disabled, so
2878  * no need to probe for this resource.
2879  */
2880  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2881  return FALSE;
2882  }
2883 
2884  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2885  /* this resource is marked as not needing to be discovered on this node */
2886  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2887  return FALSE;
2888  }
2889 
2890  if (pe__is_guest_node(node)) {
2891  resource_t *remote = node->details->remote_rsc->container;
2892 
2893  if(remote->role == RSC_ROLE_STOPPED) {
2894  /* If the container is stopped, then we know anything that
2895  * might have been inside it is also stopped and there is
2896  * no need to probe.
2897  *
2898  * If we don't know the container's state on the target
2899  * either:
2900  *
2901  * - the container is running, the transition will abort
2902  * and we'll end up in a different case next time, or
2903  *
2904  * - the container is stopped
2905  *
2906  * Either way there is no need to probe.
2907  *
2908  */
2909  if(remote->allocated_to
2910  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2911  /* For safety, we order the 'rsc' start after 'remote'
2912  * has been probed.
2913  *
2914  * Using 'top' helps for groups, but we may need to
2915  * follow the start's ordering chain backwards.
2916  */
2917  custom_action_order(remote, generate_op_key(remote->id, RSC_STATUS, 0), NULL,
2918  top, generate_op_key(top->id, RSC_START, 0), NULL,
2919  pe_order_optional, data_set);
2920  }
2921  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2922  rsc->id, node->details->id, remote->id);
2923  return FALSE;
2924 
2925  /* Here we really we want to check if remote->stop is required,
2926  * but that information doesn't exist yet
2927  */
2928  } else if(node->details->remote_requires_reset
2929  || node->details->unclean
2930  || is_set(remote->flags, pe_rsc_failed)
2931  || remote->next_role == RSC_ROLE_STOPPED
2932  || (remote->allocated_to
2933  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2934  ) {
2935  /* The container is stopping or restarting, don't start
2936  * 'rsc' until 'remote' stops as this also implies that
2937  * 'rsc' is stopped - avoiding the need to probe
2938  */
2939  custom_action_order(remote, generate_op_key(remote->id, RSC_STOP, 0), NULL,
2940  top, generate_op_key(top->id, RSC_START, 0), NULL,
2941  pe_order_optional, data_set);
2942  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2943  rsc->id, node->details->id, remote->id);
2944  return FALSE;
2945 /* } else {
2946  * The container is running so there is no problem probing it
2947  */
2948  }
2949  }
2950 
2951  key = generate_op_key(rsc->id, RSC_STATUS, 0);
2952  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2953  update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2954 
2955  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
2956 
2957  /*
2958  * We need to know if it's running_on (not just known_on) this node
2959  * to correctly determine the target rc.
2960  */
2961  running = pe_find_node_id(rsc->running_on, node->details->id);
2962  if (running == NULL) {
2963  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2964 
2965  } else if (rsc->role == RSC_ROLE_MASTER) {
2966  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
2967  }
2968 
2969  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2970  is_set(probe->flags, pe_action_runnable), rsc->running_on);
2971 
2972  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2973  top = rsc;
2974  } else {
2975  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2976  }
2977 
2978  if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
2979  /* Prevent the start from occurring if rsc isn't active, but
2980  * don't cause it to stop if it was active already
2981  */
2983  }
2984 
2985  custom_action_order(rsc, NULL, probe,
2986  top, generate_op_key(top->id, RSC_START, 0), NULL,
2987  flags, data_set);
2988 
2989  /* Before any reloads, if they exist */
2990  custom_action_order(rsc, NULL, probe,
2991  top, reload_key(rsc), NULL,
2992  pe_order_optional, data_set);
2993 
2994 #if 0
2995  // complete is always null currently
2996  if (!is_unfence_device(rsc, data_set)) {
2997  /* Normally rsc.start depends on probe complete which depends
2998  * on rsc.probe. But this can't be the case for fence devices
2999  * with unfencing, as it would create graph loops.
3000  *
3001  * So instead we explicitly order 'rsc.probe then rsc.start'
3002  */
3003  order_actions(probe, complete, pe_order_implies_then);
3004  }
3005 #endif
3006  return TRUE;
3007 }
3008 
3018 static bool
3019 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
3020 {
3021  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
3022  return TRUE;
3023 
3024  } else if ((rsc->variant == pe_native)
3025  && pe_rsc_is_anon_clone(rsc->parent)
3026  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3027  /* We check only the parent, not the uber-parent, because we cannot
3028  * assume that the resource is known if it is in an anonymously cloned
3029  * group (which may be only partially known).
3030  */
3031  return TRUE;
3032  }
3033  return FALSE;
3034 }
3035 
3044 static void
3045 native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3046 {
3047  node_t *target;
3048  GListPtr gIter = NULL;
3049 
3050  CRM_CHECK(stonith_op && stonith_op->node, return);
3051  target = stonith_op->node;
3052 
3053  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3054  action_t *action = (action_t *) gIter->data;
3055 
3056  switch (action->needs) {
3057  case rsc_req_nothing:
3058  // Anything other than start or promote requires nothing
3059  break;
3060 
3061  case rsc_req_stonith:
3062  order_actions(stonith_op, action, pe_order_optional);
3063  break;
3064 
3065  case rsc_req_quorum:
3066  if (safe_str_eq(action->task, RSC_START)
3067  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3068  && !rsc_is_known_on(rsc, target)) {
3069 
3070  /* If we don't know the status of the resource on the node
3071  * we're about to shoot, we have to assume it may be active
3072  * there. Order the resource start after the fencing. This
3073  * is analogous to waiting for all the probes for a resource
3074  * to complete before starting it.
3075  *
3076  * The most likely explanation is that the DC died and took
3077  * its status with it.
3078  */
3079  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3080  target->details->uname);
3081  order_actions(stonith_op, action,
3083  }
3084  break;
3085  }
3086  }
3087 }
3088 
3089 static void
3090 native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3091 {
3092  GListPtr gIter = NULL;
3093  GListPtr action_list = NULL;
3094  bool order_implicit = false;
3095 
3096  resource_t *top = uber_parent(rsc);
3097  pe_action_t *parent_stop = NULL;
3098  node_t *target;
3099 
3100  CRM_CHECK(stonith_op && stonith_op->node, return);
3101  target = stonith_op->node;
3102 
3103  /* Get a list of stop actions potentially implied by the fencing */
3104  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3105 
3106  /* If resource requires fencing, implicit actions must occur after fencing.
3107  *
3108  * Implied stops and demotes of resources running on guest nodes are always
3109  * ordered after fencing, even if the resource does not require fencing,
3110  * because guest node "fencing" is actually just a resource stop.
3111  */
3112  if (is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) {
3113  order_implicit = true;
3114  }
3115 
3116  if (action_list && order_implicit) {
3117  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3118  }
3119 
3120  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3121  action_t *action = (action_t *) gIter->data;
3122 
3123  // The stop would never complete, so convert it into a pseudo-action.
3125  __FUNCTION__, __LINE__);
3126 
3127  if (order_implicit) {
3129  __FUNCTION__, __LINE__);
3130 
3131  /* Order the stonith before the parent stop (if any).
3132  *
3133  * Also order the stonith before the resource stop, unless the
3134  * resource is inside a bundle -- that would cause a graph loop.
3135  * We can rely on the parent stop's ordering instead.
3136  *
3137  * User constraints must not order a resource in a guest node
3138  * relative to the guest node container resource. The
3139  * pe_order_preserve flag marks constraints as generated by the
3140  * cluster and thus immune to that check (and is irrelevant if
3141  * target is not a guest).
3142  */
3143  if (!pe_rsc_is_bundled(rsc)) {
3144  order_actions(stonith_op, action, pe_order_preserve);
3145  }
3146  order_actions(stonith_op, parent_stop, pe_order_preserve);
3147  }
3148 
3149  if (is_set(rsc->flags, pe_rsc_failed)) {
3150  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3151  rsc->id, (order_implicit? "after" : "because"),
3152  target->details->uname);
3153  } else {
3154  crm_info("%s is implicit %s %s is fenced",
3155  action->uuid, (order_implicit? "after" : "because"),
3156  target->details->uname);
3157  }
3158 
3159  if (is_set(rsc->flags, pe_rsc_notify)) {
3160  /* Create a second notification that will be delivered
3161  * immediately after the node is fenced
3162  *
3163  * Basic problem:
3164  * - C is a clone active on the node to be shot and stopping on another
3165  * - R is a resource that depends on C
3166  *
3167  * + C.stop depends on R.stop
3168  * + C.stopped depends on STONITH
3169  * + C.notify depends on C.stopped
3170  * + C.healthy depends on C.notify
3171  * + R.stop depends on C.healthy
3172  *
3173  * The extra notification here changes
3174  * + C.healthy depends on C.notify
3175  * into:
3176  * + C.healthy depends on C.notify'
3177  * + C.notify' depends on STONITH'
3178  * thus breaking the loop
3179  */
3180  create_secondary_notification(action, rsc, stonith_op, data_set);
3181  }
3182 
3183 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3184 
3185  However given group(rA, rB) running on nodeX and B.stop has failed,
3186  A := stop healthy resource (rA.stop)
3187  B := stop failed resource (pseudo operation B.stop)
3188  C := stonith nodeX
3189  A requires B, B requires C, C requires A
3190  This loop would prevent the cluster from making progress.
3191 
3192  This block creates the "C requires A" dependency and therefore must (at least
3193  for now) be disabled.
3194 
3195  Instead, run the block above and treat all resources on nodeX as B would be
3196  (marked as a pseudo op depending on the STONITH).
3197 
3198  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3199 
3200  } else if(is_stonith == FALSE) {
3201  crm_info("Moving healthy resource %s"
3202  " off %s before fencing",
3203  rsc->id, node->details->uname);
3204 
3205  * stop healthy resources before the
3206  * stonith op
3207  *
3208  custom_action_order(
3209  rsc, stop_key(rsc), NULL,
3210  NULL,strdup(CRM_OP_FENCE),stonith_op,
3211  pe_order_optional, data_set);
3212 */
3213  }
3214 
3215  g_list_free(action_list);
3216 
3217  /* Get a list of demote actions potentially implied by the fencing */
3218  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3219 
3220  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3221  action_t *action = (action_t *) gIter->data;
3222 
3223  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3224  || is_set(rsc->flags, pe_rsc_failed)) {
3225 
3226  if (is_set(rsc->flags, pe_rsc_failed)) {
3227  pe_rsc_info(rsc,
3228  "Demote of failed resource %s is implicit after %s is fenced",
3229  rsc->id, target->details->uname);
3230  } else {
3231  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3232  action->uuid, target->details->uname);
3233  }
3234 
3235  /* The demote would never complete and is now implied by the
3236  * fencing, so convert it into a pseudo-action.
3237  */
3239  __FUNCTION__, __LINE__);
3240 
3241  if (pe_rsc_is_bundled(rsc)) {
3242  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3243 
3244  } else if (order_implicit) {
3245  order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
3246  }
3247  }
3248  }
3249 
3250  g_list_free(action_list);
3251 }
3252 
3253 void
3255 {
3256  if (rsc->children) {
3257  GListPtr gIter = NULL;
3258 
3259  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3260  resource_t *child_rsc = (resource_t *) gIter->data;
3261 
3262  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3263  }
3264 
3265  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3266  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3267 
3268  } else {
3269  native_start_constraints(rsc, stonith_op, data_set);
3270  native_stop_constraints(rsc, stonith_op, data_set);
3271  }
3272 }
3273 
3274 void
3275 ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set)
3276 {
3277  GListPtr gIter = NULL;
3278  action_t *reload = NULL;
3279 
3280  if (rsc->children) {
3281  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3282  resource_t *child_rsc = (resource_t *) gIter->data;
3283 
3284  ReloadRsc(child_rsc, node, data_set);
3285  }
3286  return;
3287 
3288  } else if (rsc->variant > pe_native) {
3289  /* Complex resource with no children */
3290  return;
3291 
3292  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3293  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3294  return;
3295 
3296  } else if (is_set(rsc->flags, pe_rsc_failed)) {
3297  /* We don't need to specify any particular actions here, normal failure
3298  * recovery will apply.
3299  */
3300  pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id);
3301  return;
3302 
3303  } else if (is_set(rsc->flags, pe_rsc_start_pending)) {
3304  /* If a resource's configuration changed while a start was pending,
3305  * force a full restart.
3306  */
3307  pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id);
3308  stop_action(rsc, node, FALSE);
3309  return;
3310 
3311  } else if (node == NULL) {
3312  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3313  return;
3314  }
3315 
3316  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3317  set_bit(rsc->flags, pe_rsc_reload);
3318 
3319  reload = custom_action(
3320  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3321  pe_action_set_reason(reload, "resource definition change", FALSE);
3322 
3323  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3325  data_set);
3326  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3328  data_set);
3329 }
3330 
3331 void
3332 native_append_meta(resource_t * rsc, xmlNode * xml)
3333 {
3334  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3335  resource_t *parent;
3336 
3337  if (value) {
3338  char *name = NULL;
3339 
3341  crm_xml_add(xml, name, value);
3342  free(name);
3343  }
3344 
3345  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3346  if (value) {
3347  char *name = NULL;
3348 
3350  crm_xml_add(xml, name, value);
3351  free(name);
3352  }
3353 
3354  for (parent = rsc; parent != NULL; parent = parent->parent) {
3355  if (parent->container) {
3357  }
3358  }
3359 }
pe_action_flags
pe_action_flags
Definition: pe_types.h:267
show_scores
gboolean show_scores
Definition: pcmk_sched_messages.c:25
find_actions_exact
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1527
pe_resource_s::priority
int priority
Definition: pe_types.h:313
pe_rsc_orphan
#define pe_rsc_orphan
Definition: pe_types.h:225
pe_weights_rollback
@ pe_weights_rollback
Definition: pcmki_scheduler.h:37
pe_native
@ pe_native
Definition: pe_types.h:37
native_append_meta
void native_append_meta(resource_t *rsc, xmlNode *xml)
Definition: pcmk_sched_native.c:3332
start_key
#define start_key(rsc)
Definition: internal.h:235
INFINITY_HACK
#define INFINITY_HACK
Definition: pcmk_sched_native.c:20
GListPtr
GList * GListPtr
Definition: crm.h:215
XML_LRM_ATTR_MIGRATE_SOURCE
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:285
INFINITY
#define INFINITY
Definition: crm.h:96
rsc_colocation_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:46
LOAD_STOPPED
#define LOAD_STOPPED
Definition: pcmki_sched_utils.h:90
order_actions
gboolean order_actions(action_t *lh_action, action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1836
pe_rsc_notify
#define pe_rsc_notify
Definition: pe_types.h:230
pe_action_s::needs
enum rsc_start_requirement needs
Definition: pe_types.h:385
pe_container
@ pe_container
Definition: pe_types.h:40
sort_nodes_by_weight
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:185
pe_resource_s::exclusive_discover
gboolean exclusive_discover
Definition: pe_types.h:325
pe_cancel_op
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:421
crm_str_eq
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case)
Definition: strings.c:224
pe_resource_s::variant
enum pe_obj_types variant
Definition: pe_types.h:303
RSC_PROMOTE
#define RSC_PROMOTE
Definition: crm.h:203
find_rsc_op_entry
xmlNode * find_rsc_op_entry(resource_t *rsc, const char *key)
Definition: utils.c:1319
resource_alloc_functions_s::allocate
node_t *(* allocate)(resource_t *, node_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:23
XML_RSC_ATTR_REMOTE_NODE
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:208
pe_resource_s::dangling_migrations
GListPtr dangling_migrations
Definition: pe_types.h:351
RSC_DEMOTE
#define RSC_DEMOTE
Definition: crm.h:205
pe_rsc_allow_migrate
#define pe_rsc_allow_migrate
Definition: pe_types.h:249
StopRsc
gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2575
pe_resource_s::actions
GListPtr actions
Definition: pe_types.h:332
no_quorum_freeze
@ no_quorum_freeze
Definition: pe_types.h:61
DeleteRsc
gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2754
pacemaker-internal.h
pe_resource_s::next_role
enum rsc_role_e next_role
Definition: pe_types.h:344
pe_find_node
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:422
flags
uint64_t flags
Definition: remote.c:5
pe_node_shared_s::unseen
gboolean unseen
Definition: pe_types.h:196
pe_working_set_s::nodes
GListPtr nodes
Definition: pe_types.h:139
msg_xml.h
RSC_ROLE_STOPPED
@ RSC_ROLE_STOPPED
Definition: common.h:100
pe_resource_s::utilization
GHashTable * utilization
Definition: pe_types.h:348
pe_rsc_info
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:17
pe_resource_s::rsc_cons_lhs
GListPtr rsc_cons_lhs
Definition: pe_types.h:329
pe_node_shared_s::remote_rsc
pe_resource_t * remote_rsc
Definition: pe_types.h:208
LOG_TRACE
#define LOG_TRACE
Definition: logging.h:26
RSC_ROLE_MASTER
@ RSC_ROLE_MASTER
Definition: common.h:103
pe_rsc_debug
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:18
rsc_req_quorum
@ rsc_req_quorum
Definition: common.h:94
pe_rsc_provisional
#define pe_rsc_provisional
Definition: pe_types.h:235
pe_order_runnable_left
@ pe_order_runnable_left
Definition: pe_types.h:460
pe_ticket_s::granted
gboolean granted
Definition: pe_types.h:423
PCMK_RESOURCE_CLASS_STONITH
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
pe_resource_s::known_on
GHashTable * known_on
Definition: pe_types.h:340
pe_resource_s::children
GListPtr children
Definition: pe_types.h:350
pe_resource_s::id
char * id
Definition: pe_types.h:294
pe_rsc_allow_remote_remotes
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:241
pe_resource_s::allocated_to
pe_node_t * allocated_to
Definition: pe_types.h:336
stop_action
#define stop_action(rsc, node, optional)
Definition: internal.h:230
new_rsc_order
int new_rsc_order(resource_t *lh_rsc, const char *lh_task, resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1398
native_create_actions
void native_create_actions(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1119
influence_rsc_location
@ influence_rsc_location
Definition: pcmki_sched_utils.h:66
rsc_role_e
rsc_role_e
Definition: common.h:98
pe__location_constraint_s::id
char * id
Definition: internal.h:29
pe_weights_init
@ pe_weights_init
Definition: pcmki_scheduler.h:34
RSC_MIGRATED
#define RSC_MIGRATED
Definition: crm.h:195
CRM_CHECK
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:157
dump_node_scores
#define dump_node_scores(level, rsc, text, nodes)
Definition: internal.h:206
pe_node_s::weight
int weight
Definition: pe_types.h:218
pe_action_pseudo
@ pe_action_pseudo
Definition: pe_types.h:268
clear_bit
#define clear_bit(word, bit)
Definition: crm_internal.h:168
rsc_ticket_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:65
CRM_ATTR_UNAME
#define CRM_ATTR_UNAME
Definition: crm.h:111
graph_element_from_action
void graph_element_from_action(action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:1754
pe_rsc_fence_device
#define pe_rsc_fence_device
Definition: pe_types.h:232
pe_node_s::details
struct pe_node_shared_s * details
Definition: pe_types.h:221
custom_action
action_t * custom_action(resource_t *rsc, char *key, const char *task, node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:480
crm_notice
#define crm_notice(fmt, args...)
Definition: logging.h:243
rsc_colocation_s::rsc_lh
resource_t * rsc_lh
Definition: pcmki_scheduler.h:43
pe_node_shared_s::id
const char * id
Definition: pe_types.h:186
rsc_colocation_s::node_attribute
const char * node_attribute
Definition: pcmki_scheduler.h:42
type
enum crm_ais_msg_types type
Definition: internal.h:5
crm_err
#define crm_err(fmt, args...)
Definition: logging.h:241
pe_weights_forward
@ pe_weights_forward
Definition: pcmki_scheduler.h:35
LogActions
void LogActions(resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_native.c:2378
native_assign_node
gboolean native_assign_node(resource_t *rsc, GListPtr candidates, node_t *chosen, gboolean force)
Definition: pcmk_sched_utils.c:212
XML_LRM_ATTR_INTERVAL
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:254
crm_trace
#define crm_trace(fmt, args...)
Definition: logging.h:247
pe_resource_s::meta
GHashTable * meta
Definition: pe_types.h:346
pe_action_print_always
@ pe_action_print_always
Definition: pe_types.h:271
safe_str_eq
#define safe_str_eq(a, b)
Definition: util.h:61
rsc_colocation_s::score
int score
Definition: pcmki_scheduler.h:49
pe_action_implies
#define pe_action_implies(action, reason, flag)
Definition: internal.h:349
uber_parent
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:765
native_action_flags
enum pe_action_flags native_action_flags(action_t *action, node_t *node)
Definition: pcmk_sched_native.c:1931
pe__is_guest_or_remote_node
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
trigger_unfencing
void trigger_unfencing(resource_t *rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2429
pe_resource_s::recovery_type
enum rsc_recovery_type recovery_type
Definition: pe_types.h:308
pe_action_s::flags
enum pe_action_flags flags
Definition: pe_types.h:384
rsc_colocation_s::id
const char * id
Definition: pcmki_scheduler.h:41
create_secondary_notification
void create_secondary_notification(pe_action_t *action, resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_notif.c:821
pe_order_asymmetrical
@ pe_order_asymmetrical
Definition: pe_types.h:480
pe_group
@ pe_group
Definition: pe_types.h:38
pe_resource_s::running_on
GListPtr running_on
Definition: pe_types.h:339
pe_resource_s::partial_migration_target
pe_node_t * partial_migration_target
Definition: pe_types.h:337
rsc_colocation_new
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t *rsc_lh, resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1339
rsc_stonith_ordering
void rsc_stonith_ordering(resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3254
CRM_ATTR_UNFENCED
#define CRM_ATTR_UNFENCED
Definition: crm.h:118
pe_working_set_s::placement_strategy
const char * placement_strategy
Definition: pe_types.h:126
loss_ticket_stop
@ loss_ticket_stop
Definition: pcmki_scheduler.h:53
pe_node_shared_s::remote_requires_reset
gboolean remote_requires_reset
Definition: pe_types.h:202
set_bit
#define set_bit(word, bit)
Definition: crm_internal.h:167
get_pseudo_op
action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1888
rsc_colocation_s::rsc_rh
resource_t * rsc_rh
Definition: pcmki_scheduler.h:44
CRM_ATTR_ID
#define CRM_ATTR_ID
Definition: crm.h:112
custom_action_order
int custom_action_order(resource_t *lh_rsc, char *lh_task, action_t *lh_action, resource_t *rh_rsc, char *rh_task, action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1564
native_expand
void native_expand(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2230
influence_rsc_priority
@ influence_rsc_priority
Definition: pcmki_sched_utils.h:67
native_update_actions
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2010
pe_action_s::uuid
char * uuid
Definition: pe_types.h:380
ID
#define ID(x)
Definition: msg_xml.h:415
pe_weights
pe_weights
Definition: pcmki_scheduler.h:32
demote_key
#define demote_key(rsc)
Definition: internal.h:255
pe_order_same_node
@ pe_order_same_node
Definition: pe_types.h:475
rsc_ticket_s::loss_policy
enum loss_ticket_policy_e loss_policy
Definition: pcmki_scheduler.h:63
RSC_START
#define RSC_START
Definition: crm.h:197
pe_ticket_s::id
char * id
Definition: pe_types.h:422
promote_action
#define promote_action(rsc, node, optional)
Definition: internal.h:246
pe_order_load
@ pe_order_load
Definition: pe_types.h:481
pe_fence_node
void pe_fence_node(pe_working_set_t *data_set, node_t *node, const char *reason)
Schedule a fence action for a node.
Definition: unpack.c:78
pe_err
#define pe_err(fmt...)
Definition: internal.h:21
RSC_ROLE_SLAVE
@ RSC_ROLE_SLAVE
Definition: common.h:102
pe_action_s
Definition: pe_types.h:371
CRM_META
#define CRM_META
Definition: crm.h:72
pe_node_shared_s::shutdown
gboolean shutdown
Definition: pe_types.h:197
pe_graph_none
@ pe_graph_none
Definition: pe_types.h:260
process_utilization
void process_utilization(resource_t *rsc, node_t **prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_utilization.c:331
crm_info
#define crm_info(fmt, args...)
Definition: logging.h:244
resource_alloc_functions_s::create_probe
gboolean(* create_probe)(resource_t *, node_t *, action_t *, gboolean, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:25
ReloadRsc
void ReloadRsc(resource_t *rsc, node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3275
update_action_flags
gboolean update_action_flags(action_t *action, enum pe_action_flags flags, const char *source, int line)
Definition: pcmk_sched_allocate.c:119
pe__location_constraint_s::role_filter
enum rsc_role_e role_filter
Definition: internal.h:31
CRM_LOG_ASSERT
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:143
rsc_ticket_s::ticket
ticket_t * ticket
Definition: pcmki_scheduler.h:62
XML_AGENT_ATTR_CLASS
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:229
XML_ATTR_TE_TARGET_RC
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:361
filter_colocation_constraint
enum filter_colocation_res filter_colocation_constraint(resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
Definition: pcmk_sched_native.c:1609
pe_action_set_reason
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2557
DemoteRsc
gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2719
native_rsc_colocation_rh
void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1799
demote_action
#define demote_action(rsc, node, optional)
Definition: internal.h:256
native_rsc_colocation_lh
void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1589
role2text
const char * role2text(enum rsc_role_e role)
Definition: common.c:338
pe_action_implied_by_stonith
@ pe_action_implied_by_stonith
Definition: pe_types.h:274
crm_strdup_printf
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
pe_flag_stop_everything
#define pe_flag_stop_everything
Definition: pe_types.h:101
RSC_ROLE_UNKNOWN
@ RSC_ROLE_UNKNOWN
Definition: common.h:99
crm_debug
#define crm_debug(fmt, args...)
Definition: logging.h:246
resource_alloc_functions_s::merge_weights
GHashTable *(* merge_weights)(resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
Definition: pcmki_sched_allocate.h:21
pe_order_pseudo_left
@ pe_order_pseudo_left
Definition: pe_types.h:462
pe_flag_stdout
#define pe_flag_stdout
Definition: pe_types.h:113
pe_action_s::node
pe_node_t * node
Definition: pe_types.h:376
resource_object_functions_s::state
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:52
native_rsc_colocation_rh_must
void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
pe_order_optional
@ pe_order_optional
Definition: pe_types.h:450
rsc_req_stonith
@ rsc_req_stonith
Definition: common.h:95
pe_node_s::rsc_discover_mode
int rsc_discover_mode
Definition: pe_types.h:222
pe_action_optional
@ pe_action_optional
Definition: pe_types.h:270
resource_alloc_functions_s::rsc_colocation_rh
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:30
pe_resource_s::partial_migration_source
pe_node_t * partial_migration_source
Definition: pe_types.h:338
pe_rsc_needs_unfencing
#define pe_rsc_needs_unfencing
Definition: pe_types.h:257
pe_order_implies_first
@ pe_order_implies_first
Definition: pe_types.h:453
resource_alloc_functions_s::expand
void(* expand)(resource_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:42
promote_key
#define promote_key(rsc)
Definition: internal.h:245
pe__location_constraint_s::discover_mode
enum pe_discover_e discover_mode
Definition: internal.h:32
do_crm_log
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:122
RSC_STOP
#define RSC_STOP
Definition: crm.h:200
native_rsc_location
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
Definition: pcmk_sched_native.c:2165
filter_colocation_res
filter_colocation_res
Definition: pcmki_sched_utils.h:64
crm_xml_add
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:313
RSC_MIGRATE
#define RSC_MIGRATE
Definition: crm.h:194
pe_working_set_s
Definition: pe_types.h:118
rsc_ticket_constraint
void rsc_ticket_constraint(resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1841
crm_element_value
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:519
pe_action_clear
@ pe_action_clear
Definition: pe_types.h:279
node_copy
node_t * node_copy(const node_t *this_node)
Definition: utils.c:132
pe__find_active_on
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:861
pe_action_s::reason
char * reason
Definition: pe_types.h:382
influence_nothing
@ influence_nothing
Definition: pcmki_sched_utils.h:65
node_hash_from_list
GHashTable * node_hash_from_list(GListPtr list)
Definition: utils.c:188
rsc_colocation_s::role_rh
int role_rh
Definition: pcmki_scheduler.h:47
pe_resource_s::rsc_cons
GListPtr rsc_cons
Definition: pe_types.h:330
RSC_ROLE_MAX
#define RSC_ROLE_MAX
Definition: common.h:106
sort_node_uname
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:231
text2role
enum rsc_role_e text2role(const char *role)
Definition: common.c:359
pe_rsc_merging
#define pe_rsc_merging
Definition: pe_types.h:237
loss_ticket_demote
@ loss_ticket_demote
Definition: pcmki_scheduler.h:54
pe_action_s::id
int id
Definition: pe_types.h:372
can_run_resources
gboolean can_run_resources(const node_t *node)
Definition: pcmk_sched_utils.c:62
pe_rsc_allocating
#define pe_rsc_allocating
Definition: pe_types.h:236
rules.h
add_hash_param
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:415
delete_action
#define delete_action(rsc, node, optional)
Definition: internal.h:220
pe_resource_s::container
pe_resource_t * container
Definition: pe_types.h:353
variant.h
pe__location_constraint_s::node_list_rh
GListPtr node_list_rh
Definition: internal.h:33
pe_order_implies_first_master
@ pe_order_implies_first_master
Definition: pe_types.h:455
pe_fence_op
action_t * pe_fence_op(node_t *node, const char *op, bool optional, const char *reason, pe_working_set_t *data_set)
Definition: utils.c:2347
pe_rsc_needs_fencing
#define pe_rsc_needs_fencing
Definition: pe_types.h:256
pe_flag_have_stonith_resource
#define pe_flag_have_stonith_resource
Definition: pe_types.h:95
resource_alloc_functions_s::rsc_colocation_lh
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:28
XML_RSC_ATTR_TARGET_ROLE
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:196
pe_rsc_unique
#define pe_rsc_unique
Definition: pe_types.h:231
reload_key
#define reload_key(rsc)
Definition: internal.h:234
pe_order_implies_then
@ pe_order_implies_then
Definition: pe_types.h:454
resource_location
void resource_location(resource_t *rsc, node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1624
pe_graph_flags
pe_graph_flags
Definition: pe_types.h:259
can_run_any
gboolean can_run_any(GHashTable *nodes)
Definition: pcmk_sched_utils.c:377
find_actions
GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node)
Definition: utils.c:1487
StartRsc
gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2659
RSC_STATUS
#define RSC_STATUS
Definition: crm.h:211
pe_graph_updated_first
@ pe_graph_updated_first
Definition: pe_types.h:261
CRMD_ACTION_RELOAD
#define CRMD_ACTION_RELOAD
Definition: crm.h:168
rsc_req_nothing
@ rsc_req_nothing
Definition: common.h:93
pe_ticket_s::standby
gboolean standby
Definition: pe_types.h:425
RoleError
gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2737
pe_action_reschedule
@ pe_action_reschedule
Definition: pe_types.h:287
pe_clear_action_bit
#define pe_clear_action_bit(action, bit)
Definition: internal.h:26
safe_str_neq
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:161
pe_order_then_cancels_first
@ pe_order_then_cancels_first
Definition: pe_types.h:486
check_utilization
gboolean check_utilization(const char *value)
Definition: utils.c:180
pe_action_s::rsc
pe_resource_t * rsc
Definition: pe_types.h:375
native_create_probe
gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2784
rsc_colocation_s
Definition: pcmki_scheduler.h:40
pe_resource_s::parent
pe_resource_t * parent
Definition: pe_types.h:301
crm_parse_interval_spec
guint crm_parse_interval_spec(const char *input)
Definition: utils.c:545
XML_LRM_ATTR_MIGRATE_TARGET
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:286
crm_str
#define crm_str(x)
Definition: logging.h:267
native_color
node_t * native_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:437
pe_order_implies_first_migratable
@ pe_order_implies_first_migratable
Definition: pe_types.h:458
services.h
Services API.
STOP_SANITY_ASSERT
#define STOP_SANITY_ASSERT(lineno)
Definition: pcmk_sched_native.c:2263
rsc_state_matrix
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
Definition: pcmk_sched_native.c:51
pe_resource_s::flags
unsigned long long flags
Definition: pe_types.h:321
score2char_stack
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:240
pe_action_migrate_runnable
@ pe_action_migrate_runnable
Definition: pe_types.h:275
pe_order_restart
@ pe_order_restart
Definition: pe_types.h:472
rsc_ticket_s::id
const char * id
Definition: pcmki_scheduler.h:60
pe_flag_enable_unfencing
#define pe_flag_enable_unfencing
Definition: pe_types.h:96
pe_order_implies_then_on_node
@ pe_order_implies_then_on_node
Definition: pe_types.h:463
pe_flag_remove_after_stop
#define pe_flag_remove_after_stop
Definition: pe_types.h:104
PCMK_OCF_RUNNING_MASTER
@ PCMK_OCF_RUNNING_MASTER
Definition: services.h:98
recovery_stop_start
@ recovery_stop_start
Definition: common.h:87
pe_resource_s::role
enum rsc_role_e role
Definition: pe_types.h:343
pe_rsc_trace
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:19
native_merge_weights
GHashTable * native_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
Definition: pcmk_sched_native.c:297
pe_order_preserve
@ pe_order_preserve
Definition: pe_types.h:485
pe_flag_startup_probes
#define pe_flag_startup_probes
Definition: pe_types.h:107
CRM_ASSERT
#define CRM_ASSERT(expr)
Definition: results.h:42
pcmk__bundle_log_actions
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_bundle.c:1054
pe__resource_actions
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1576
pe_rsc_start_pending
#define pe_rsc_start_pending
Definition: pe_types.h:245
pe_action_runnable
@ pe_action_runnable
Definition: pe_types.h:269
pe_action_dangle
@ pe_action_dangle
Definition: pe_types.h:280
pe_rsc_managed
#define pe_rsc_managed
Definition: pe_types.h:226
loss_ticket_fence
@ loss_ticket_fence
Definition: pcmki_scheduler.h:55
start_action
#define start_action(rsc, node, optional)
Definition: internal.h:236
PCMK_OCF_NOT_RUNNING
@ PCMK_OCF_NOT_RUNNING
Definition: services.h:97
pe_node_attribute_raw
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:471
pe_rsc_promotable
#define pe_rsc_promotable
Definition: pe_types.h:233
RSC_DELETE
#define RSC_DELETE
Definition: crm.h:191
pe_flag_stonith_enabled
#define pe_flag_stonith_enabled
Definition: pe_types.h:94
rsc_merge_weights
GHashTable * rsc_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
Definition: pcmk_sched_native.c:304
pe_node_shared_s::maintenance
gboolean maintenance
Definition: pe_types.h:200
merge_weights
int merge_weights(int w1, int w2)
Definition: common.c:378
pe__is_guest_node
gboolean pe__is_guest_node(pe_node_t *node)
Definition: remote.c:47
rsc_action_matrix
gboolean(* rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t *, node_t *, gboolean, pe_working_set_t *)
Definition: pcmk_sched_native.c:61
pe_resource_s
Definition: pe_types.h:293
pe_working_set_s::flags
unsigned long long flags
Definition: pe_types.h:128
pe_resource_s::allowed_nodes
GHashTable * allowed_nodes
Definition: pe_types.h:341
rsc_ticket_s
Definition: pcmki_scheduler.h:59
pe_node_shared_s::unclean
gboolean unclean
Definition: pe_types.h:195
pe__location_constraint_s
Definition: internal.h:28
pe_rsc_reload
#define pe_rsc_reload
Definition: pe_types.h:240
pe_resource_s::ops_xml
xmlNode * ops_xml
Definition: pe_types.h:298
loss_ticket_freeze
@ loss_ticket_freeze
Definition: pcmki_scheduler.h:56
RSC_ROLE_STARTED
@ RSC_ROLE_STARTED
Definition: common.h:101
pe_working_set_s::no_quorum_policy
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:131
NullOp
gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2746
XML_OP_ATTR_PENDING
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:221
generate_op_key
char * generate_op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key.
Definition: operations.c:39
update_action
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:512
PromoteRsc
gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2678
XML_RSC_ATTR_INCARNATION
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:186
pe_node_shared_s::online
gboolean online
Definition: pe_types.h:191
pe_graph_updated_then
@ pe_graph_updated_then
Definition: pe_types.h:262
pe_action_s::task
char * task
Definition: pe_types.h:379
pe_node_shared_s::uname
const char * uname
Definition: pe_types.h:187
pe_resource_s::cmds
resource_alloc_functions_t * cmds
Definition: pe_types.h:306
pe_weights_positive
@ pe_weights_positive
Definition: pcmki_scheduler.h:36
pe_discover_exclusive
@ pe_discover_exclusive
Definition: pe_types.h:444
pe_rsc_stop
#define pe_rsc_stop
Definition: pe_types.h:239
pe_resource_s::is_remote_node
gboolean is_remote_node
Definition: pe_types.h:324
crm_internal.h
node_hash_dup
GHashTable * node_hash_dup(GHashTable *hash)
Definition: pcmk_sched_native.c:286
pe__resource_contains_guest_node
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:76
stop_key
#define stop_key(rsc)
Definition: internal.h:229
pe_node_s
Definition: pe_types.h:217
pe_flag_have_quorum
#define pe_flag_have_quorum
Definition: pe_types.h:90
pe_rsc_maintenance
#define pe_rsc_maintenance
Definition: pe_types.h:252
pe_find_node_id
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:406
crm_meta_name
char * crm_meta_name(const char *field)
Definition: utils.c:742
XML_RSC_ATTR_CONTAINER
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:205
pe_discover_never
@ pe_discover_never
Definition: pe_types.h:443
pe_action_s::meta
GHashTable * meta
Definition: pe_types.h:389
pe_rsc_block
#define pe_rsc_block
Definition: pe_types.h:227
native_rsc_colocation_rh_mustnot
void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
scores_log_level
int scores_log_level
Definition: pcmk_sched_messages.c:26
pe_resource_s::fns
resource_object_functions_t * fns
Definition: pe_types.h:305
native_internal_constraints
void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1376
find_first_action
action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t *on_node)
Definition: utils.c:1457
resource_alloc_functions_s::action_flags
enum pe_action_flags(* action_flags)(action_t *, node_t *)
Definition: pcmki_sched_allocate.h:35
crm_config_err
#define crm_config_err(fmt...)
Definition: crm_internal.h:179
pe_proc_err
#define pe_proc_err(fmt...)
Definition: internal.h:23
pe_rsc_failed
#define pe_rsc_failed
Definition: pe_types.h:243
pe_ordering
pe_ordering
Definition: pe_types.h:448