1 /*
2 * Copyright 2012-2026 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #include <errno.h>
13 #include <stdbool.h>
14
15 #include <crm/crm.h>
16 #include <crm/common/iso8601.h>
17 #include <crm/common/xml.h>
18 #include <crm/lrmd_internal.h>
19
20 #include <pacemaker-internal.h>
21 #include <pacemaker-controld.h>
22
23 static GHashTable *lrm_state_table = NULL;
24 extern GHashTable *proxy_table;
25 int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
26 void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
27
28 static void
29 free_rsc_info(gpointer value)
30 {
31 lrmd_rsc_info_t *rsc_info = value;
32
33 lrmd_free_rsc_info(rsc_info);
34 }
35
36 static void
37 free_deletion_op(gpointer value)
38 {
39 struct pending_deletion_op_s *op = value;
40
41 free(op->rsc);
42 delete_ha_msg_input(op->input);
43 free(op);
44 }
45
46 static void
47 free_recurring_op(gpointer value)
48 {
49 active_op_t *op = value;
50
51 free(op->user_data);
52 free(op->rsc_id);
53 free(op->op_type);
54 free(op->op_key);
55 g_clear_pointer(&op->params, g_hash_table_destroy);
56 free(op);
57 }
58
59 static gboolean
60 fail_pending_op(gpointer key, gpointer value, gpointer user_data)
61 {
62 lrmd_event_data_t event = { 0, };
63 lrm_state_t *lrm_state = user_data;
64 active_op_t *op = value;
65
66 pcmk__trace("Pre-emptively failing " PCMK__OP_FMT " on %s (call=%s, %s)",
67 op->rsc_id, op->op_type, op->interval_ms,
68 lrm_state->node_name, (const char *) key, op->user_data);
69
70 event.type = lrmd_event_exec_complete;
71 event.rsc_id = op->rsc_id;
72 event.op_type = op->op_type;
73 event.user_data = op->user_data;
74 event.timeout = 0;
75 event.interval_ms = op->interval_ms;
76 lrmd__set_result(&event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_NOT_CONNECTED,
77 "Action was pending when executor connection was dropped");
78 event.t_run = op->start_time;
79 event.t_rcchange = op->start_time;
80
81 event.call_id = op->call_id;
82 event.remote_nodename = lrm_state->node_name;
83 event.params = op->params;
84
85 process_lrm_event(lrm_state, &event, op, NULL);
86 lrmd__reset_result(&event);
87 return TRUE;
88 }
89
90 gboolean
91 lrm_state_is_local(lrm_state_t *lrm_state)
92 {
93 return (lrm_state != NULL) && controld_is_local_node(lrm_state->node_name);
94 }
95
96 /*!
97 * \internal
98 * \brief Create executor state entry for a node and add it to the state table
99 *
100 * \param[in] node_name Node to create entry for
101 *
102 * \return Newly allocated executor state object initialized for \p node_name
103 */
104 static lrm_state_t *
105 lrm_state_create(const char *node_name)
106 {
107 lrm_state_t *state = NULL;
108
109 if (!node_name) {
110 pcmk__err("No node name given for lrm state object");
111 return NULL;
112 }
113
114 state = pcmk__assert_alloc(1, sizeof(lrm_state_t));
115
116 state->node_name = pcmk__str_copy(node_name);
117 state->rsc_info_cache = pcmk__strkey_table(NULL, free_rsc_info);
118 state->deletion_ops = pcmk__strkey_table(free, free_deletion_op);
119 state->active_ops = pcmk__strkey_table(free, free_recurring_op);
120 state->resource_history = pcmk__strkey_table(NULL, history_free);
121 state->metadata_cache = metadata_cache_new();
122
123 g_hash_table_insert(lrm_state_table, (char *)state->node_name, state);
124 return state;
125 }
126
127 static gboolean
128 remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data)
129 {
130 remote_proxy_t *proxy = value;
131 const char *node_name = user_data;
132
133 if (pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
134 return TRUE;
135 }
136
137 return FALSE;
138 }
139
140 static remote_proxy_t *
141 find_connected_proxy_by_node(const char * node_name)
142 {
143 GHashTableIter gIter;
144 remote_proxy_t *proxy = NULL;
145
146 CRM_CHECK(proxy_table != NULL, return NULL);
147
148 g_hash_table_iter_init(&gIter, proxy_table);
149
150 while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) {
151 if (proxy->source
152 && pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
153 return proxy;
154 }
155 }
156
157 return NULL;
158 }
159
160 static void
161 remote_proxy_disconnect_by_node(const char * node_name)
162 {
163 remote_proxy_t *proxy = NULL;
164
165 CRM_CHECK(proxy_table != NULL, return);
166
167 while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) {
168 /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected()
169 * , which removes the entry from proxy_table.
170 * Do not do this in a g_hash_table_iter_next() loop. */
171 if (proxy->source) {
172 mainloop_del_ipc_client(proxy->source);
173 }
174 }
175 }
176
177 static void
178 internal_lrm_state_destroy(gpointer data)
179 {
180 lrm_state_t *lrm_state = data;
181
|
(1) Event path: |
Condition "!lrm_state", taking false branch. |
182 if (!lrm_state) {
183 return;
184 }
185
186 /* Rather than directly remove the recorded proxy entries from proxy_table,
187 * make sure any connected proxies get disconnected. So that
188 * remote_proxy_disconnected() will be called and as well remove the
189 * entries from proxy_table.
190 */
191 remote_proxy_disconnect_by_node(lrm_state->node_name);
192
|
(2) Event path: |
Switch case default. |
|
(3) Event path: |
Condition "trace_cs == NULL", taking true branch. |
|
(4) Event path: |
Condition "crm_is_callsite_active(trace_cs, _level, 0)", taking false branch. |
|
(5) Event path: |
Breaking from switch. |
193 pcmk__trace("Destroying proxy table %s with %u members",
194 lrm_state->node_name, g_hash_table_size(proxy_table));
195 // Just in case there's still any leftovers in proxy_table
196 g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name);
197 remote_ra_cleanup(lrm_state);
198 lrmd_api_delete(lrm_state->conn);
199
|
(6) Event path: |
Condition "_p", taking true branch. |
200 g_clear_pointer(&lrm_state->rsc_info_cache, g_hash_table_destroy);
|
CID (unavailable; MK=e869f4c7ad785dcbc90410a40d8872c1) (#2 of 4): Inconsistent C union access (INCONSISTENT_UNION_ACCESS): |
|
(7) Event assign_union_field: |
The union field "in" of "_pp" is written. |
|
(8) Event inconsistent_union_field_access: |
In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in". |
201 g_clear_pointer(&lrm_state->resource_history, g_hash_table_destroy);
202 g_clear_pointer(&lrm_state->deletion_ops, g_hash_table_destroy);
203 g_clear_pointer(&lrm_state->active_ops, g_hash_table_destroy);
204
205 metadata_cache_free(lrm_state->metadata_cache);
206
207 free((char *)lrm_state->node_name);
208 free(lrm_state);
209 }
210
211 void
212 lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata)
213 {
214 if (lrm_state->resource_history) {
215 pcmk__trace("Resetting resource history cache with %u members",
216 g_hash_table_size(lrm_state->resource_history));
217 g_hash_table_remove_all(lrm_state->resource_history);
218 }
219 if (lrm_state->deletion_ops) {
220 pcmk__trace("Resetting deletion operations cache with %u members",
221 g_hash_table_size(lrm_state->deletion_ops));
222 g_hash_table_remove_all(lrm_state->deletion_ops);
223 }
224 if (lrm_state->active_ops != NULL) {
225 pcmk__trace("Resetting active operations cache with %u members",
226 g_hash_table_size(lrm_state->active_ops));
227 g_hash_table_remove_all(lrm_state->active_ops);
228 }
229 if (lrm_state->rsc_info_cache) {
230 pcmk__trace("Resetting resource information cache with %u members",
231 g_hash_table_size(lrm_state->rsc_info_cache));
232 g_hash_table_remove_all(lrm_state->rsc_info_cache);
233 }
234 if (reset_metadata) {
235 metadata_cache_reset(lrm_state->metadata_cache);
236 }
237 }
238
239 void
240 lrm_state_init_local(void)
241 {
242 if (lrm_state_table != NULL) {
243 return;
244 }
245
246 lrm_state_table = pcmk__strikey_table(NULL, internal_lrm_state_destroy);
247 proxy_table = pcmk__strikey_table(NULL, remote_proxy_free);
248 }
249
250 void
251 lrm_state_destroy_all(void)
252 {
253 g_clear_pointer(&lrm_state_table, g_hash_table_destroy);
254 g_clear_pointer(&proxy_table, g_hash_table_destroy);
255 }
256
257 /*!
258 * \internal
259 * \brief Get executor state object
260 *
261 * \param[in] node_name Get executor state for this node (local node if NULL)
262 * \param[in] create If true, create executor state if it doesn't exist
263 *
264 * \return Executor state object for \p node_name
265 */
266 lrm_state_t *
267 controld_get_executor_state(const char *node_name, bool create)
268 {
269 lrm_state_t *state = NULL;
270
271 if ((node_name == NULL) && (controld_globals.cluster != NULL)) {
272 node_name = controld_globals.cluster->priv->node_name;
273 }
274 if ((node_name == NULL) || (lrm_state_table == NULL)) {
275 return NULL;
276 }
277
278 state = g_hash_table_lookup(lrm_state_table, node_name);
279 if ((state == NULL) && create) {
280 state = lrm_state_create(node_name);
281 }
282 return state;
283 }
284
285 /* @TODO the lone caller just needs to iterate over the values, so replace this
286 * with a g_hash_table_foreach() wrapper instead
287 */
288 GList *
289 lrm_state_get_list(void)
290 {
291 if (lrm_state_table == NULL) {
292 return NULL;
293 }
294 return g_hash_table_get_values(lrm_state_table);
295 }
296
297 void
298 lrm_state_disconnect_only(lrm_state_t * lrm_state)
299 {
300 guint removed = 0;
301
302 if (!lrm_state->conn) {
303 return;
304 }
305 pcmk__trace("Disconnecting %s", lrm_state->node_name);
306
307 remote_proxy_disconnect_by_node(lrm_state->node_name);
308
309 ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn);
310
311 if (!pcmk__is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
312 removed = g_hash_table_foreach_remove(lrm_state->active_ops,
313 fail_pending_op, lrm_state);
314 pcmk__trace("Synthesized %u operation failures for %s", removed,
315 lrm_state->node_name);
316 }
317 }
318
319 void
320 lrm_state_disconnect(lrm_state_t * lrm_state)
321 {
322 if (!lrm_state->conn) {
323 return;
324 }
325
326 lrm_state_disconnect_only(lrm_state);
327
328 lrmd_api_delete(lrm_state->conn);
329 lrm_state->conn = NULL;
330 }
331
332 int
333 lrm_state_is_connected(lrm_state_t * lrm_state)
334 {
335 if (!lrm_state->conn) {
336 return FALSE;
337 }
338 return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn);
339 }
340
341 int
342 lrm_state_poke_connection(lrm_state_t * lrm_state)
343 {
344
345 if (!lrm_state->conn) {
346 return -ENOTCONN;
347 }
348 return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn);
349 }
350
351 // \return Standard Pacemaker return code
352 int
353 controld_connect_local_executor(lrm_state_t *lrm_state)
354 {
355 int rc = pcmk_rc_ok;
356
357 if (lrm_state->conn == NULL) {
358 lrmd_t *api = NULL;
359
360 rc = lrmd__new(&api, NULL, NULL, 0);
361 if (rc != pcmk_rc_ok) {
362 return rc;
363 }
364 api->cmds->set_callback(api, lrm_op_callback);
365 lrm_state->conn = api;
366 }
367
368 rc = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn,
369 CRM_SYSTEM_CRMD, NULL);
370 rc = pcmk_legacy2rc(rc);
371
372 if (rc == pcmk_rc_ok) {
373 lrm_state->num_lrm_register_fails = 0;
374 } else {
375 lrm_state->num_lrm_register_fails++;
376 }
377 return rc;
378 }
379
380 static remote_proxy_t *
381 crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel)
382 {
383 struct ipc_client_callbacks proxy_callbacks = {
384 .dispatch = remote_proxy_dispatch,
385 .destroy = remote_proxy_disconnected
386 };
387 remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name,
388 session_id, channel);
389 return proxy;
390 }
391
392 gboolean
393 crmd_is_proxy_session(const char *session)
394 {
395 return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE;
396 }
397
398 void
399 crmd_proxy_send(const char *session, xmlNode *msg)
400 {
401 remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
402 lrm_state_t *lrm_state = NULL;
403
404 if (!proxy) {
405 return;
406 }
407 pcmk__log_xml_trace(msg, "to-proxy");
408 lrm_state = controld_get_executor_state(proxy->node_name, false);
409 if (lrm_state) {
410 pcmk__trace("Sending event to %.8s on %s", proxy->session_id,
411 proxy->node_name);
412 remote_proxy_relay_event(proxy, msg);
413 }
414 }
415
416 static void
417 crmd_proxy_dispatch(const char *session, xmlNode *msg)
418 {
419 pcmk__trace("Processing proxied IPC message from session %s", session);
420 pcmk__log_xml_trace(msg, "controller[inbound]");
421 pcmk__xe_set(msg, PCMK__XA_CRM_SYS_FROM, session);
422 if (controld_authorize_ipc_message(msg, NULL, session)) {
423 route_message(C_IPC_MESSAGE, msg);
424 }
425 controld_trigger_fsa();
426 }
427
428 static void
429 remote_config_check(xmlNode *msg, int call_id, int rc, xmlNode *output,
430 void *user_data)
431 {
432 lrmd_t *lrmd = user_data;
433 GHashTable *config_hash = NULL;
434 crm_time_t *now = NULL;
435 pcmk_rule_input_t rule_input = { NULL, };
436
437 if (rc != pcmk_ok) {
438 pcmk__err("Query resulted in an error: %s", pcmk_strerror(rc));
439
440 if ((rc == -EACCES) || (rc == -pcmk_err_schema_validation)) {
441 pcmk__err("The cluster is misconfigured - shutting down and "
442 "staying down");
443 }
444
445 return;
446 }
447
448 config_hash = pcmk__strkey_table(free, free);
449 now = crm_time_new(NULL);
450 rule_input.now = now;
451
452 pcmk__debug("Call %d : Parsing CIB options", call_id);
453 if (output != NULL) {
454 pcmk__unpack_nvpair_blocks(output, PCMK_XE_CLUSTER_PROPERTY_SET,
455 PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS,
456 &rule_input, config_hash, NULL, output->doc);
457 }
458
459 // Now send it to the remote peer
460 lrmd__validate_remote_settings(lrmd, config_hash);
461
462 g_hash_table_destroy(config_hash);
463 crm_time_free(now);
464 }
465
466 static void
467 crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
468 {
469 lrm_state_t *lrm_state = userdata;
470 const char *session = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SESSION);
471 remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
472
473 const char *op = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_OP);
474 if (pcmk__str_eq(op, LRMD_IPC_OP_NEW, pcmk__str_casei)) {
475 const char *channel = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SERVER);
476
477 proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel);
478 if (!remote_ra_controlling_guest(lrm_state)) {
479 if (proxy != NULL) {
480 cib_t *cib_conn = controld_globals.cib_conn;
481
482 /* Look up PCMK_OPT_FENCING_WATCHDOG_TIMEOUT and send to the
483 * remote peer for validation
484 */
485 int rc = cib_conn->cmds->query(cib_conn, PCMK_XE_CRM_CONFIG,
486 NULL, cib_none);
487 cib_conn->cmds->register_callback_full(cib_conn, rc, 10, FALSE,
488 lrmd,
489 "remote_config_check",
490 remote_config_check,
491 NULL);
492 }
493 } else {
494 pcmk__debug("Skipping remote_config_check for guest-nodes");
495 }
496
497 } else if (pcmk__str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ, pcmk__str_casei)) {
498 char *now_s = NULL;
499
500 pcmk__notice("%s requested shutdown of its remote connection",
501 lrm_state->node_name);
502
503 if (!remote_ra_is_in_maintenance(lrm_state)) {
504 now_s = pcmk__ttoa(time(NULL));
505 update_attrd(lrm_state->node_name, PCMK__NODE_ATTR_SHUTDOWN, now_s,
506 true);
507 free(now_s);
508
509 remote_proxy_ack_shutdown(lrmd);
510
511 pcmk__warn("Reconnection attempts to %s may result in failures "
512 "that must be cleared",
513 lrm_state->node_name);
514 } else {
515 remote_proxy_nack_shutdown(lrmd);
516
517 pcmk__notice("Remote resource for %s is not managed so no ordered "
518 "shutdown happening",
519 lrm_state->node_name);
520 }
521 return;
522
523 } else if (pcmk__str_eq(op, LRMD_IPC_OP_REQUEST, pcmk__str_casei) && proxy && proxy->is_local) {
524 /* This is for the controller, which we are, so don't try
525 * to send to ourselves over IPC -- do it directly.
526 */
527 uint32_t flags = 0U;
528 int rc = pcmk_rc_ok;
529 xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_LRMD_IPC_MSG,
530 NULL, NULL);
531 xmlNode *request = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
532
533 CRM_CHECK(request != NULL, return);
534 CRM_CHECK(lrm_state->node_name, return);
535 pcmk__xe_set(request, PCMK_XE_ACL_ROLE, "pacemaker-remote");
536 pcmk__update_acl_user(request, PCMK__XA_LRMD_IPC_USER,
537 lrm_state->node_name);
538
539 /* Pacemaker Remote nodes don't know their own names (as known to the
540 * cluster). When getting a node info request with no name or ID, add
541 * the name, so we don't return info for ourselves instead of the
542 * Pacemaker Remote node.
543 */
544 if (pcmk__str_eq(pcmk__xe_get(request, PCMK__XA_CRM_TASK),
545 CRM_OP_NODE_INFO, pcmk__str_none)) {
546 int node_id = 0;
547
548 pcmk__xe_get_int(request, PCMK_XA_ID, &node_id);
549 if ((node_id <= 0)
550 && (pcmk__xe_get(request, PCMK_XA_UNAME) == NULL)) {
551 pcmk__xe_set(request, PCMK_XA_UNAME, lrm_state->node_name);
552 }
553 }
554
555 crmd_proxy_dispatch(session, request);
556
557 rc = pcmk__xe_get_flags(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, &flags, 0U);
558 if (rc != pcmk_rc_ok) {
559 pcmk__warn("Couldn't parse controller flags from remote request: "
560 "%s",
561 pcmk_rc_str(rc));
562 }
563 if (pcmk__is_set(flags, crm_ipc_client_response)) {
564 int msg_id = 0;
565 xmlNode *op_reply = pcmk__xe_create(NULL, PCMK__XE_ACK);
566
567 pcmk__xe_set(op_reply, PCMK_XA_FUNCTION, __func__);
568 pcmk__xe_set_int(op_reply, PCMK__XA_LINE, __LINE__);
569
570 pcmk__xe_get_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id);
571 remote_proxy_relay_response(proxy, op_reply, msg_id);
572
573 pcmk__xml_free(op_reply);
574 }
575
576 } else {
577 remote_proxy_cb(lrmd, lrm_state->node_name, msg);
578 }
579 }
580
581
582 // \return Standard Pacemaker return code
583 int
584 controld_connect_remote_executor(lrm_state_t *lrm_state, const char *server,
585 int port, int timeout_ms)
586 {
587 int rc = pcmk_rc_ok;
588
589 if (lrm_state->conn == NULL) {
590 lrmd_t *api = NULL;
591
592 rc = lrmd__new(&api, lrm_state->node_name, server, port);
593 if (rc != pcmk_rc_ok) {
594 pcmk__warn("Pacemaker Remote connection to %s:%s failed: %s "
595 QB_XS " rc=%d",
596 server, port, pcmk_rc_str(rc), rc);
597
598 return rc;
599 }
600 lrm_state->conn = api;
601 api->cmds->set_callback(api, remote_lrm_op_callback);
602 lrmd_internal_set_proxy_callback(api, lrm_state, crmd_remote_proxy_cb);
603 }
604
605 pcmk__trace("Initiating remote connection to %s:%d with timeout %dms",
606 server, port, timeout_ms);
607 rc = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn,
608 lrm_state->node_name,
609 timeout_ms);
610 if (rc == pcmk_ok) {
611 lrm_state->num_lrm_register_fails = 0;
612 } else {
613 lrm_state->num_lrm_register_fails++; // Ignored for remote connections
614 }
615 return pcmk_legacy2rc(rc);
616 }
617
618 int
619 lrm_state_get_metadata(lrm_state_t * lrm_state,
620 const char *class,
621 const char *provider,
622 const char *agent, char **output, enum lrmd_call_options options)
623 {
624 lrmd_key_value_t *params = NULL;
625
626 if (!lrm_state->conn) {
627 return -ENOTCONN;
628 }
629
630 /* Add the node name to the environment, as is done with normal resource
631 * action calls. Meta-data calls shouldn't need it, but some agents are
632 * written with an ocf_local_nodename call at the beginning regardless of
633 * action. Without the environment variable, the agent would try to contact
634 * the controller to get the node name -- but the controller would be
635 * blocking on the synchronous meta-data call.
636 *
637 * At this point, we have to assume that agents are unlikely to make other
638 * calls that require the controller, such as crm_node --quorum or
639 * --cluster-id.
640 *
641 * @TODO Make meta-data calls asynchronous. (This will be part of a larger
642 * project to make meta-data calls via the executor rather than directly.)
643 */
644 params = lrmd_key_value_add(params, CRM_META "_" PCMK__META_ON_NODE,
645 lrm_state->node_name);
646
647 return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn,
648 class, provider, agent, output, options, params);
649 }
650
651 int
652 lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
653 guint interval_ms)
654 {
655 if (!lrm_state->conn) {
656 return -ENOTCONN;
657 }
658
659 /* Figure out a way to make this async?
660 * NOTICE: Currently it's synced and directly acknowledged in
661 * controld_invoke_execd().
662 */
663 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
664 return remote_ra_cancel(lrm_state, rsc_id, action, interval_ms);
665 }
666 return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id,
667 action, interval_ms);
668 }
669
670 lrmd_rsc_info_t *
671 lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options)
672 {
673 lrmd_rsc_info_t *rsc = NULL;
674
675 if (!lrm_state->conn) {
676 return NULL;
677 }
678 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
679 return remote_ra_get_rsc_info(lrm_state, rsc_id);
680 }
681
682 rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id);
683 if (rsc == NULL) {
684 /* only contact the lrmd if we don't already have a cached rsc info */
685 rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options);
686 if (rsc == NULL) {
687 return NULL;
688 }
689 /* cache the result */
690 g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc);
691 }
692
693 return lrmd_copy_rsc_info(rsc);
694
695 }
696
697 /*!
698 * \internal
699 * \brief Initiate a resource agent action
700 *
701 * \param[in,out] lrm_state Executor state object
702 * \param[in] rsc_id ID of resource for action
703 * \param[in] action Action to execute
704 * \param[in] userdata String to copy and pass to execution callback
705 * \param[in] interval_ms Action interval (in milliseconds)
706 * \param[in] timeout_ms Action timeout (in milliseconds)
707 * \param[in] start_delay_ms Delay (in ms) before initiating action
708 * \param[in] parameters Hash table of resource parameters
709 * \param[out] call_id Where to store call ID on success
710 *
711 * \return Standard Pacemaker return code
712 */
713 int
714 controld_execute_resource_agent(lrm_state_t *lrm_state, const char *rsc_id,
715 const char *action, const char *userdata,
716 guint interval_ms, int timeout_ms,
717 int start_delay_ms, GHashTable *parameters,
718 int *call_id)
719 {
720 int rc = pcmk_rc_ok;
721 lrmd_key_value_t *params = NULL;
722
723 if (lrm_state->conn == NULL) {
724 return ENOTCONN;
725 }
726
727 // Convert parameters from hash table to list
728 if (parameters != NULL) {
729 const char *key = NULL;
730 const char *value = NULL;
731 GHashTableIter iter;
732
733 g_hash_table_iter_init(&iter, parameters);
734 while (g_hash_table_iter_next(&iter, (gpointer *) &key,
735 (gpointer *) &value)) {
736 params = lrmd_key_value_add(params, key, value);
737 }
738 }
739
740 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
741 rc = controld_execute_remote_agent(lrm_state, rsc_id, action,
742 userdata, interval_ms, timeout_ms,
743 start_delay_ms, params, call_id);
744
745 } else {
746 rc = ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id,
747 action, userdata,
748 interval_ms, timeout_ms,
749 start_delay_ms,
750 lrmd_opt_notify_changes_only,
751 params);
752 if (rc < 0) {
753 rc = pcmk_legacy2rc(rc);
754 } else {
755 *call_id = rc;
756 rc = pcmk_rc_ok;
757 }
758 }
759 return rc;
760 }
761
762 int
763 lrm_state_register_rsc(lrm_state_t * lrm_state,
764 const char *rsc_id,
765 const char *class,
766 const char *provider, const char *agent, enum lrmd_call_options options)
767 {
768 lrmd_t *conn = (lrmd_t *) lrm_state->conn;
769
770 if (conn == NULL) {
771 return -ENOTCONN;
772 }
773
774 if (is_remote_lrmd_ra(agent, provider, NULL)) {
775 return controld_get_executor_state(rsc_id, true)? pcmk_ok : -EINVAL;
776 }
777
778 /* @TODO Implement an asynchronous version of this (currently a blocking
779 * call to the lrmd).
780 */
781 return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider,
782 agent, options);
783 }
784
785 int
786 lrm_state_unregister_rsc(lrm_state_t * lrm_state,
787 const char *rsc_id, enum lrmd_call_options options)
788 {
789 if (!lrm_state->conn) {
790 return -ENOTCONN;
791 }
792
793 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
794 g_hash_table_remove(lrm_state_table, rsc_id);
795 return pcmk_ok;
796 }
797
798 g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id);
799
800 /* @TODO Optimize this ... this function is a blocking round trip from
801 * client to daemon. The controld_execd_state.c code path that uses this
802 * function should always treat it as an async operation. The executor API
803 * should make an async version available.
804 */
805 return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options);
806 }
807