1 /*
2 * Copyright 2009-2026 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #include <stdbool.h>
13 #include <stdio.h>
14 #include <errno.h>
15 #include <glib.h>
16
17 #include <crm/pengine/status.h>
18 #include <crm/pengine/internal.h>
19
20 #include <pacemaker-internal.h>
21 #include <pacemaker-fenced.h>
22
23 // fenced_scheduler_run() assumes it's the only place scheduler->input gets set
24 static pcmk_scheduler_t *scheduler = NULL;
25
26 /*!
27 * \internal
28 * \brief Initialize scheduler data for fencer purposes
29 *
30 * \return Standard Pacemaker return code
31 */
32 int
33 fenced_scheduler_init(void)
34 {
35 pcmk__output_t *logger = NULL;
36 int rc = pcmk__log_output_new(&logger);
37
38 if (rc != pcmk_rc_ok) {
39 return rc;
40 }
41
42 scheduler = pcmk_new_scheduler();
43 if (scheduler == NULL) {
44 pcmk__output_free(logger);
45 return ENOMEM;
46 }
47
48 pe__register_messages(logger);
49 pcmk__register_lib_messages(logger);
50 pcmk__output_set_log_level(logger, LOG_TRACE);
51 scheduler->priv->out = logger;
52
53 return pcmk_rc_ok;
54 }
55
56 /*!
57 * \internal
58 * \brief Set the local node name for scheduling purposes
59 *
60 * \param[in] node_name Name to set as local node name
61 */
62 void
63 fenced_set_local_node(const char *node_name)
64 {
65 pcmk__assert(scheduler != NULL);
66
67 scheduler->priv->local_node_name = pcmk__str_copy(node_name);
68 }
69
70 /*!
71 * \internal
72 * \brief Get the local node name
73 *
74 * \return Local node name
75 */
76 const char *
77 fenced_get_local_node(void)
78 {
79 if (scheduler == NULL) {
80 return NULL;
81 }
82 return scheduler->priv->local_node_name;
83 }
84
85 /*!
86 * \internal
87 * \brief Free all scheduler-related resources
88 */
89 void
90 fenced_scheduler_cleanup(void)
91 {
|
(1) Event path: |
Condition "scheduler == NULL", taking false branch. |
92 if (scheduler == NULL) {
93 return;
94 }
95
|
(2) Event path: |
Condition "scheduler->priv->out != NULL", taking true branch. |
96 if (scheduler->priv->out != NULL) {
97 scheduler->priv->out->finish(scheduler->priv->out, CRM_EX_OK, true,
98 NULL);
|
CID (unavailable; MK=1d77adb8aa3d5986cd90accd489a53ed) (#1 of 2): Inconsistent C union access (INCONSISTENT_UNION_ACCESS): |
|
(3) Event assign_union_field: |
The union field "in" of "_pp" is written. |
|
(4) Event inconsistent_union_field_access: |
In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in". |
99 g_clear_pointer(&scheduler->priv->out, pcmk__output_free);
100 }
101
102 g_clear_pointer(&scheduler, pcmk_free_scheduler);
103 }
104
105 /*!
106 * \internal
107 * \brief Check whether the local node is in a resource's allowed node list
108 *
109 * \param[in] rsc Resource to check
110 *
111 * \return Pointer to node if found, otherwise NULL
112 */
113 static pcmk_node_t *
114 local_node_allowed_for(const pcmk_resource_t *rsc)
115 {
116 if ((rsc != NULL) && (scheduler->priv->local_node_name != NULL)) {
117 GHashTableIter iter;
118 pcmk_node_t *node = NULL;
119
120 g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes);
121 while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
122 if (pcmk__str_eq(node->priv->name, scheduler->priv->local_node_name,
123 pcmk__str_casei)) {
124 return node;
125 }
126 }
127 }
128 return NULL;
129 }
130
131 /*!
132 * \internal
133 * \brief If a given resource or any of its children are fencing devices,
134 * register the devices
135 *
136 * \param[in,out] data Resource to check
137 * \param[in,out] user_data Ignored
138 */
139 static void
140 register_if_fencing_device(gpointer data, gpointer user_data)
141 {
142 pcmk_resource_t *rsc = data;
143 const char *rsc_id = pcmk__s(rsc->priv->history_id, rsc->id);
144
145 xmlNode *xml = NULL;
146 GHashTableIter hash_iter;
147 pcmk_node_t *node = NULL;
148 const char *name = NULL;
149 const char *value = NULL;
150 const char *agent = NULL;
151 const char *rsc_provides = NULL;
152 stonith_key_value_t *params = NULL;
153
154 // If this is a collective resource, check children instead
155 if (rsc->priv->children != NULL) {
156
157 for (GList *iter = rsc->priv->children;
158 iter != NULL; iter = iter->next) {
159
160 register_if_fencing_device(iter->data, NULL);
161 if (pcmk__is_clone(rsc)) {
162 return; // Only one instance needs to be checked for clones
163 }
164 }
165 return;
166 }
167
168 if (!pcmk__is_set(rsc->flags, pcmk__rsc_fence_device)) {
169 return; // Not a fencing device
170 }
171
172 if (pe__resource_is_disabled(rsc)) {
173 pcmk__info("Ignoring fencing device %s because it is disabled",
174 rsc->id);
175 return;
176 }
177
178 if ((fencing_watchdog_timeout_ms <= 0)
179 && pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
180
181 pcmk__info("Ignoring fencing device %s because watchdog fencing is "
182 "disabled", rsc->id);
183 return;
184 }
185
186 // Check whether local node is allowed to run resource
187 node = local_node_allowed_for(rsc);
188 if (node == NULL) {
189 pcmk__info("Ignoring fencing device %s because local node is not "
190 "allowed to run it",
191 rsc->id);
192 return;
193 }
194 if (node->assign->score < 0) {
195 pcmk__info("Ignoring fencing device %s because local node has "
196 "preference %s for it",
197 rsc->id, pcmk_readable_score(node->assign->score));
198 return;
199 }
200
201 // If device is in a group, check whether local node is allowed for group
202 if (pcmk__is_group(rsc->priv->parent)) {
203 pcmk_node_t *group_node = local_node_allowed_for(rsc->priv->parent);
204
205 if ((group_node != NULL) && (group_node->assign->score < 0)) {
206 pcmk__info("Ignoring fencing device %s because local node has "
207 "preference %s for its group",
208 rsc->id, pcmk_readable_score(group_node->assign->score));
209 return;
210 }
211 }
212
213 pcmk__debug("Reloading configuration of fencing device %s", rsc->id);
214
215 agent = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE);
216
217 get_meta_attributes(rsc->priv->meta, rsc, NULL, scheduler);
218 rsc_provides = g_hash_table_lookup(rsc->priv->meta,
219 PCMK_FENCING_PROVIDES);
220
221 g_hash_table_iter_init(&hash_iter, pe_rsc_params(rsc, node, scheduler));
222 while (g_hash_table_iter_next(&hash_iter, (gpointer *) &name,
223 (gpointer *) &value)) {
224 if ((name == NULL) || (value == NULL)) {
225 continue;
226 }
227 params = stonith__key_value_add(params, name, value);
228 }
229
230 xml = create_device_registration_xml(rsc_id, st_namespace_any, agent,
231 params, rsc_provides);
232 stonith__key_value_freeall(params, true, true);
233 pcmk__assert(fenced_device_register(xml, true) == pcmk_rc_ok);
234 pcmk__xml_free(xml);
235 }
236
237 /*!
238 * \internal
239 * \brief Run the scheduler for fencer purposes
240 *
241 * \param[in] cib CIB to use as scheduler input
242 *
243 * \note Scheduler object is reset before returning, but \p cib is not freed.
244 */
245 void
246 fenced_scheduler_run(xmlNode *cib)
247 {
248 CRM_CHECK((cib != NULL) && (scheduler != NULL)
249 && (scheduler->input == NULL), return);
250
251 pcmk_reset_scheduler(scheduler);
252
253 scheduler->input = cib;
254 pcmk__set_scheduler_flags(scheduler,
255 pcmk__sched_location_only|pcmk__sched_no_counts);
256 pcmk__schedule_actions(scheduler);
257 g_list_foreach(scheduler->priv->resources, register_if_fencing_device,
258 NULL);
259
260 scheduler->input = NULL; // Wasn't a copy, so don't let API free it
261 pcmk_reset_scheduler(scheduler);
262 }
263