1 /*
2 * Copyright 2012-2026 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #include <errno.h>
13 #include <stdbool.h>
14
15 #include <crm/crm.h>
16 #include <crm/common/iso8601.h>
17 #include <crm/common/xml.h>
18 #include <crm/lrmd_internal.h>
19
20 #include <pacemaker-internal.h>
21 #include <pacemaker-controld.h>
22
23 static GHashTable *lrm_state_table = NULL;
24 extern GHashTable *proxy_table;
25 int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
26 void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
27
28 static void
29 free_rsc_info(gpointer value)
30 {
31 lrmd_rsc_info_t *rsc_info = value;
32
33 lrmd_free_rsc_info(rsc_info);
34 }
35
36 static void
37 free_deletion_op(gpointer value)
38 {
39 struct pending_deletion_op_s *op = value;
40
41 free(op->rsc);
42 delete_ha_msg_input(op->input);
43 free(op);
44 }
45
46 static void
47 free_recurring_op(gpointer value)
48 {
49 active_op_t *op = value;
50
51 free(op->user_data);
52 free(op->rsc_id);
53 free(op->op_type);
54 free(op->op_key);
55 g_clear_pointer(&op->params, g_hash_table_destroy);
56 free(op);
57 }
58
59 static gboolean
60 fail_pending_op(gpointer key, gpointer value, gpointer user_data)
61 {
62 lrmd_event_data_t event = { 0, };
63 lrm_state_t *lrm_state = user_data;
64 active_op_t *op = value;
65
66 pcmk__trace("Pre-emptively failing " PCMK__OP_FMT " on %s (call=%s, %s)",
67 op->rsc_id, op->op_type, op->interval_ms,
68 lrm_state->node_name, (const char *) key, op->user_data);
69
70 event.type = lrmd_event_exec_complete;
71 event.rsc_id = op->rsc_id;
72 event.op_type = op->op_type;
73 event.user_data = op->user_data;
74 event.timeout = 0;
75 event.interval_ms = op->interval_ms;
76 lrmd__set_result(&event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_NOT_CONNECTED,
77 "Action was pending when executor connection was dropped");
78 event.t_run = op->start_time;
79 event.t_rcchange = op->start_time;
80
81 event.call_id = op->call_id;
82 event.remote_nodename = lrm_state->node_name;
83 event.params = op->params;
84
85 process_lrm_event(lrm_state, &event, op, NULL);
86 lrmd__reset_result(&event);
87 return TRUE;
88 }
89
90 gboolean
91 lrm_state_is_local(lrm_state_t *lrm_state)
92 {
93 return (lrm_state != NULL) && controld_is_local_node(lrm_state->node_name);
94 }
95
96 /*!
97 * \internal
98 * \brief Create executor state entry for a node and add it to the state table
99 *
100 * \param[in] node_name Node to create entry for
101 *
102 * \return Newly allocated executor state object initialized for \p node_name
103 */
104 static lrm_state_t *
105 lrm_state_create(const char *node_name)
106 {
107 lrm_state_t *state = NULL;
108
109 if (!node_name) {
110 pcmk__err("No node name given for lrm state object");
111 return NULL;
112 }
113
114 state = pcmk__assert_alloc(1, sizeof(lrm_state_t));
115
116 state->node_name = pcmk__str_copy(node_name);
117 state->rsc_info_cache = pcmk__strkey_table(NULL, free_rsc_info);
118 state->deletion_ops = pcmk__strkey_table(free, free_deletion_op);
119 state->active_ops = pcmk__strkey_table(free, free_recurring_op);
120 state->resource_history = pcmk__strkey_table(NULL, history_free);
121 state->metadata_cache = metadata_cache_new();
122
123 g_hash_table_insert(lrm_state_table, (char *)state->node_name, state);
124 return state;
125 }
126
127 static gboolean
128 remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data)
129 {
130 remote_proxy_t *proxy = value;
131 const char *node_name = user_data;
132
133 if (pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
134 return TRUE;
135 }
136
137 return FALSE;
138 }
139
140 static remote_proxy_t *
141 find_connected_proxy_by_node(const char * node_name)
142 {
143 GHashTableIter gIter;
144 remote_proxy_t *proxy = NULL;
145
146 CRM_CHECK(proxy_table != NULL, return NULL);
147
148 g_hash_table_iter_init(&gIter, proxy_table);
149
150 while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) {
151 if (proxy->source
152 && pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
153 return proxy;
154 }
155 }
156
157 return NULL;
158 }
159
160 static void
161 remote_proxy_disconnect_by_node(const char * node_name)
162 {
163 remote_proxy_t *proxy = NULL;
164
165 CRM_CHECK(proxy_table != NULL, return);
166
167 while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) {
168 /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected()
169 * , which removes the entry from proxy_table.
170 * Do not do this in a g_hash_table_iter_next() loop. */
171 if (proxy->source) {
172 mainloop_del_ipc_client(proxy->source);
173 }
174 }
175 }
176
177 static void
178 internal_lrm_state_destroy(gpointer data)
179 {
180 lrm_state_t *lrm_state = data;
181
|
(1) Event path: |
Condition "!lrm_state", taking false branch. |
182 if (!lrm_state) {
183 return;
184 }
185
186 /* Rather than directly remove the recorded proxy entries from proxy_table,
187 * make sure any connected proxies get disconnected. So that
188 * remote_proxy_disconnected() will be called and as well remove the
189 * entries from proxy_table.
190 */
191 remote_proxy_disconnect_by_node(lrm_state->node_name);
192
|
(2) Event path: |
Switch case default. |
|
(3) Event path: |
Condition "trace_cs == NULL", taking true branch. |
|
(4) Event path: |
Condition "crm_is_callsite_active(trace_cs, _level, 0)", taking false branch. |
|
(5) Event path: |
Breaking from switch. |
193 pcmk__trace("Destroying proxy table %s with %u members",
194 lrm_state->node_name, g_hash_table_size(proxy_table));
195 // Just in case there's still any leftovers in proxy_table
196 g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name);
197 remote_ra_cleanup(lrm_state);
198 lrmd_api_delete(lrm_state->conn);
199
|
(6) Event path: |
Condition "_p", taking true branch. |
200 g_clear_pointer(&lrm_state->rsc_info_cache, g_hash_table_destroy);
|
(7) Event path: |
Condition "_p", taking true branch. |
201 g_clear_pointer(&lrm_state->resource_history, g_hash_table_destroy);
|
CID (unavailable; MK=e869f4c7ad785dcbc90410a40d8872c1) (#3 of 4): Inconsistent C union access (INCONSISTENT_UNION_ACCESS): |
|
(8) Event assign_union_field: |
The union field "in" of "_pp" is written. |
|
(9) Event inconsistent_union_field_access: |
In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in". |
202 g_clear_pointer(&lrm_state->deletion_ops, g_hash_table_destroy);
203 g_clear_pointer(&lrm_state->active_ops, g_hash_table_destroy);
204
205 metadata_cache_free(lrm_state->metadata_cache);
206
207 free((char *)lrm_state->node_name);
208 free(lrm_state);
209 }
210
211 void
212 lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata)
213 {
214 if (lrm_state->resource_history) {
215 pcmk__trace("Resetting resource history cache with %u members",
216 g_hash_table_size(lrm_state->resource_history));
217 g_hash_table_remove_all(lrm_state->resource_history);
218 }
219 if (lrm_state->deletion_ops) {
220 pcmk__trace("Resetting deletion operations cache with %u members",
221 g_hash_table_size(lrm_state->deletion_ops));
222 g_hash_table_remove_all(lrm_state->deletion_ops);
223 }
224 if (lrm_state->active_ops != NULL) {
225 pcmk__trace("Resetting active operations cache with %u members",
226 g_hash_table_size(lrm_state->active_ops));
227 g_hash_table_remove_all(lrm_state->active_ops);
228 }
229 if (lrm_state->rsc_info_cache) {
230 pcmk__trace("Resetting resource information cache with %u members",
231 g_hash_table_size(lrm_state->rsc_info_cache));
232 g_hash_table_remove_all(lrm_state->rsc_info_cache);
233 }
234 if (reset_metadata) {
235 metadata_cache_reset(lrm_state->metadata_cache);
236 }
237 }
238
239 void
240 lrm_state_init_local(void)
241 {
242 if (lrm_state_table != NULL) {
243 return;
244 }
245
246 lrm_state_table = pcmk__strikey_table(NULL, internal_lrm_state_destroy);
247 proxy_table = pcmk__strikey_table(NULL, remote_proxy_free);
248 }
249
250 void
251 lrm_state_destroy_all(void)
252 {
253 g_clear_pointer(&lrm_state_table, g_hash_table_destroy);
254 g_clear_pointer(&proxy_table, g_hash_table_destroy);
255 }
256
257 /*!
258 * \internal
259 * \brief Get executor state object
260 *
261 * \param[in] node_name Get executor state for this node (local node if NULL)
262 * \param[in] create If true, create executor state if it doesn't exist
263 *
264 * \return Executor state object for \p node_name
265 */
266 lrm_state_t *
267 controld_get_executor_state(const char *node_name, bool create)
268 {
269 lrm_state_t *state = NULL;
270
271 if ((node_name == NULL) && (controld_globals.cluster != NULL)) {
272 node_name = controld_globals.cluster->priv->node_name;
273 }
274 if ((node_name == NULL) || (lrm_state_table == NULL)) {
275 return NULL;
276 }
277
278 state = g_hash_table_lookup(lrm_state_table, node_name);
279 if ((state == NULL) && create) {
280 state = lrm_state_create(node_name);
281 }
282 return state;
283 }
284
285 /* @TODO the lone caller just needs to iterate over the values, so replace this
286 * with a g_hash_table_foreach() wrapper instead
287 */
288 GList *
289 lrm_state_get_list(void)
290 {
291 if (lrm_state_table == NULL) {
292 return NULL;
293 }
294 return g_hash_table_get_values(lrm_state_table);
295 }
296
297 void
298 lrm_state_disconnect_only(lrm_state_t * lrm_state)
299 {
300 guint removed = 0;
301
302 if (!lrm_state->conn) {
303 return;
304 }
305 pcmk__trace("Disconnecting %s", lrm_state->node_name);
306
307 remote_proxy_disconnect_by_node(lrm_state->node_name);
308
309 ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn);
310
311 if (!pcmk__is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
312 removed = g_hash_table_foreach_remove(lrm_state->active_ops,
313 fail_pending_op, lrm_state);
314 pcmk__trace("Synthesized %u operation failures for %s", removed,
315 lrm_state->node_name);
316 }
317 }
318
319 void
320 lrm_state_disconnect(lrm_state_t * lrm_state)
321 {
322 if (!lrm_state->conn) {
323 return;
324 }
325
326 lrm_state_disconnect_only(lrm_state);
327
328 lrmd_api_delete(lrm_state->conn);
329 lrm_state->conn = NULL;
330 }
331
332 int
333 lrm_state_is_connected(lrm_state_t * lrm_state)
334 {
335 if (!lrm_state->conn) {
336 return FALSE;
337 }
338 return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn);
339 }
340
341 int
342 lrm_state_poke_connection(lrm_state_t * lrm_state)
343 {
344
345 if (!lrm_state->conn) {
346 return -ENOTCONN;
347 }
348 return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn);
349 }
350
351 // \return Standard Pacemaker return code
352 int
353 controld_connect_local_executor(lrm_state_t *lrm_state)
354 {
355 int rc = pcmk_rc_ok;
356
357 if (lrm_state->conn == NULL) {
358 lrmd_t *api = NULL;
359
360 rc = lrmd__new(&api, NULL, NULL, 0);
361 if (rc != pcmk_rc_ok) {
362 return rc;
363 }
364 api->cmds->set_callback(api, lrm_op_callback);
365 lrm_state->conn = api;
366 }
367
368 rc = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn,
369 CRM_SYSTEM_CRMD, NULL);
370 rc = pcmk_legacy2rc(rc);
371
372 if (rc == pcmk_rc_ok) {
373 lrm_state->num_lrm_register_fails = 0;
374 } else {
375 lrm_state->num_lrm_register_fails++;
376 }
377 return rc;
378 }
379
380 static remote_proxy_t *
381 crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel)
382 {
383 struct ipc_client_callbacks proxy_callbacks = {
384 .dispatch = remote_proxy_dispatch,
385 .destroy = remote_proxy_disconnected
386 };
387 remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name,
388 session_id, channel);
389 return proxy;
390 }
391
392 gboolean
393 crmd_is_proxy_session(const char *session)
394 {
395 return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE;
396 }
397
398 void
399 crmd_proxy_send(const char *session, xmlNode *msg)
400 {
401 remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
402 lrm_state_t *lrm_state = NULL;
403
404 if (!proxy) {
405 return;
406 }
407 pcmk__log_xml_trace(msg, "to-proxy");
408 lrm_state = controld_get_executor_state(proxy->node_name, false);
409 if (lrm_state) {
410 pcmk__trace("Sending event to %.8s on %s", proxy->session_id,
411 proxy->node_name);
412 remote_proxy_relay_event(proxy, msg);
413 }
414 }
415
416 static void
417 crmd_proxy_dispatch(const char *session, xmlNode *msg)
418 {
419 pcmk__trace("Processing proxied IPC message from session %s", session);
420 pcmk__log_xml_trace(msg, "controller[inbound]");
421 pcmk__xe_set(msg, PCMK__XA_CRM_SYS_FROM, session);
422 if (controld_authorize_ipc_message(msg, NULL, session)) {
423 route_message(C_IPC_MESSAGE, msg);
424 }
425 controld_trigger_fsa();
426 }
427
428 static void
429 remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
430 {
431 if (rc != pcmk_ok) {
432 pcmk__err("Query resulted in an error: %s", pcmk_strerror(rc));
433
434 if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
435 pcmk__err("The cluster is mis-configured - shutting down and "
436 "staying down");
437 }
438
439 } else {
440 lrmd_t * lrmd = (lrmd_t *)user_data;
441 crm_time_t *now = crm_time_new(NULL);
442 GHashTable *config_hash = pcmk__strkey_table(free, free);
443 pcmk_rule_input_t rule_input = {
444 .now = now,
445 };
446
447 pcmk__debug("Call %d : Parsing CIB options", call_id);
448 pcmk_unpack_nvpair_blocks(output, PCMK_XE_CLUSTER_PROPERTY_SET,
449 PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, &rule_input,
450 config_hash, NULL);
451
452 /* Now send it to the remote peer */
453 lrmd__validate_remote_settings(lrmd, config_hash);
454
455 g_hash_table_destroy(config_hash);
456 crm_time_free(now);
457 }
458 }
459
460 static void
461 crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
462 {
463 lrm_state_t *lrm_state = userdata;
464 const char *session = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SESSION);
465 remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
466
467 const char *op = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_OP);
468 if (pcmk__str_eq(op, LRMD_IPC_OP_NEW, pcmk__str_casei)) {
469 const char *channel = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SERVER);
470
471 proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel);
472 if (!remote_ra_controlling_guest(lrm_state)) {
473 if (proxy != NULL) {
474 cib_t *cib_conn = controld_globals.cib_conn;
475
476 /* Look up PCMK_OPT_FENCING_WATCHDOG_TIMEOUT and send to the
477 * remote peer for validation
478 */
479 int rc = cib_conn->cmds->query(cib_conn, PCMK_XE_CRM_CONFIG,
480 NULL, cib_none);
481 cib_conn->cmds->register_callback_full(cib_conn, rc, 10, FALSE,
482 lrmd,
483 "remote_config_check",
484 remote_config_check,
485 NULL);
486 }
487 } else {
488 pcmk__debug("Skipping remote_config_check for guest-nodes");
489 }
490
491 } else if (pcmk__str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ, pcmk__str_casei)) {
492 char *now_s = NULL;
493
494 pcmk__notice("%s requested shutdown of its remote connection",
495 lrm_state->node_name);
496
497 if (!remote_ra_is_in_maintenance(lrm_state)) {
498 now_s = pcmk__ttoa(time(NULL));
499 update_attrd(lrm_state->node_name, PCMK__NODE_ATTR_SHUTDOWN, now_s,
500 true);
501 free(now_s);
502
503 remote_proxy_ack_shutdown(lrmd);
504
505 pcmk__warn("Reconnection attempts to %s may result in failures "
506 "that must be cleared",
507 lrm_state->node_name);
508 } else {
509 remote_proxy_nack_shutdown(lrmd);
510
511 pcmk__notice("Remote resource for %s is not managed so no ordered "
512 "shutdown happening",
513 lrm_state->node_name);
514 }
515 return;
516
517 } else if (pcmk__str_eq(op, LRMD_IPC_OP_REQUEST, pcmk__str_casei) && proxy && proxy->is_local) {
518 /* This is for the controller, which we are, so don't try
519 * to send to ourselves over IPC -- do it directly.
520 */
521 uint32_t flags = 0U;
522 int rc = pcmk_rc_ok;
523 xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_LRMD_IPC_MSG,
524 NULL, NULL);
525 xmlNode *request = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
526
527 CRM_CHECK(request != NULL, return);
528 CRM_CHECK(lrm_state->node_name, return);
529 pcmk__xe_set(request, PCMK_XE_ACL_ROLE, "pacemaker-remote");
530 pcmk__update_acl_user(request, PCMK__XA_LRMD_IPC_USER,
531 lrm_state->node_name);
532
533 /* Pacemaker Remote nodes don't know their own names (as known to the
534 * cluster). When getting a node info request with no name or ID, add
535 * the name, so we don't return info for ourselves instead of the
536 * Pacemaker Remote node.
537 */
538 if (pcmk__str_eq(pcmk__xe_get(request, PCMK__XA_CRM_TASK),
539 CRM_OP_NODE_INFO, pcmk__str_none)) {
540 int node_id = 0;
541
542 pcmk__xe_get_int(request, PCMK_XA_ID, &node_id);
543 if ((node_id <= 0)
544 && (pcmk__xe_get(request, PCMK_XA_UNAME) == NULL)) {
545 pcmk__xe_set(request, PCMK_XA_UNAME, lrm_state->node_name);
546 }
547 }
548
549 crmd_proxy_dispatch(session, request);
550
551 rc = pcmk__xe_get_flags(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, &flags, 0U);
552 if (rc != pcmk_rc_ok) {
553 pcmk__warn("Couldn't parse controller flags from remote request: "
554 "%s",
555 pcmk_rc_str(rc));
556 }
557 if (pcmk__is_set(flags, crm_ipc_client_response)) {
558 int msg_id = 0;
559 xmlNode *op_reply = pcmk__xe_create(NULL, PCMK__XE_ACK);
560
561 pcmk__xe_set(op_reply, PCMK_XA_FUNCTION, __func__);
562 pcmk__xe_set_int(op_reply, PCMK__XA_LINE, __LINE__);
563
564 pcmk__xe_get_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id);
565 remote_proxy_relay_response(proxy, op_reply, msg_id);
566
567 pcmk__xml_free(op_reply);
568 }
569
570 } else {
571 remote_proxy_cb(lrmd, lrm_state->node_name, msg);
572 }
573 }
574
575
576 // \return Standard Pacemaker return code
577 int
578 controld_connect_remote_executor(lrm_state_t *lrm_state, const char *server,
579 int port, int timeout_ms)
580 {
581 int rc = pcmk_rc_ok;
582
583 if (lrm_state->conn == NULL) {
584 lrmd_t *api = NULL;
585
586 rc = lrmd__new(&api, lrm_state->node_name, server, port);
587 if (rc != pcmk_rc_ok) {
588 pcmk__warn("Pacemaker Remote connection to %s:%s failed: %s "
589 QB_XS " rc=%d",
590 server, port, pcmk_rc_str(rc), rc);
591
592 return rc;
593 }
594 lrm_state->conn = api;
595 api->cmds->set_callback(api, remote_lrm_op_callback);
596 lrmd_internal_set_proxy_callback(api, lrm_state, crmd_remote_proxy_cb);
597 }
598
599 pcmk__trace("Initiating remote connection to %s:%d with timeout %dms",
600 server, port, timeout_ms);
601 rc = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn,
602 lrm_state->node_name,
603 timeout_ms);
604 if (rc == pcmk_ok) {
605 lrm_state->num_lrm_register_fails = 0;
606 } else {
607 lrm_state->num_lrm_register_fails++; // Ignored for remote connections
608 }
609 return pcmk_legacy2rc(rc);
610 }
611
612 int
613 lrm_state_get_metadata(lrm_state_t * lrm_state,
614 const char *class,
615 const char *provider,
616 const char *agent, char **output, enum lrmd_call_options options)
617 {
618 lrmd_key_value_t *params = NULL;
619
620 if (!lrm_state->conn) {
621 return -ENOTCONN;
622 }
623
624 /* Add the node name to the environment, as is done with normal resource
625 * action calls. Meta-data calls shouldn't need it, but some agents are
626 * written with an ocf_local_nodename call at the beginning regardless of
627 * action. Without the environment variable, the agent would try to contact
628 * the controller to get the node name -- but the controller would be
629 * blocking on the synchronous meta-data call.
630 *
631 * At this point, we have to assume that agents are unlikely to make other
632 * calls that require the controller, such as crm_node --quorum or
633 * --cluster-id.
634 *
635 * @TODO Make meta-data calls asynchronous. (This will be part of a larger
636 * project to make meta-data calls via the executor rather than directly.)
637 */
638 params = lrmd_key_value_add(params, CRM_META "_" PCMK__META_ON_NODE,
639 lrm_state->node_name);
640
641 return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn,
642 class, provider, agent, output, options, params);
643 }
644
645 int
646 lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
647 guint interval_ms)
648 {
649 if (!lrm_state->conn) {
650 return -ENOTCONN;
651 }
652
653 /* Figure out a way to make this async?
654 * NOTICE: Currently it's synced and directly acknowledged in
655 * controld_invoke_execd().
656 */
657 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
658 return remote_ra_cancel(lrm_state, rsc_id, action, interval_ms);
659 }
660 return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id,
661 action, interval_ms);
662 }
663
664 lrmd_rsc_info_t *
665 lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options)
666 {
667 lrmd_rsc_info_t *rsc = NULL;
668
669 if (!lrm_state->conn) {
670 return NULL;
671 }
672 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
673 return remote_ra_get_rsc_info(lrm_state, rsc_id);
674 }
675
676 rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id);
677 if (rsc == NULL) {
678 /* only contact the lrmd if we don't already have a cached rsc info */
679 rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options);
680 if (rsc == NULL) {
681 return NULL;
682 }
683 /* cache the result */
684 g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc);
685 }
686
687 return lrmd_copy_rsc_info(rsc);
688
689 }
690
691 /*!
692 * \internal
693 * \brief Initiate a resource agent action
694 *
695 * \param[in,out] lrm_state Executor state object
696 * \param[in] rsc_id ID of resource for action
697 * \param[in] action Action to execute
698 * \param[in] userdata String to copy and pass to execution callback
699 * \param[in] interval_ms Action interval (in milliseconds)
700 * \param[in] timeout_ms Action timeout (in milliseconds)
701 * \param[in] start_delay_ms Delay (in ms) before initiating action
702 * \param[in] parameters Hash table of resource parameters
703 * \param[out] call_id Where to store call ID on success
704 *
705 * \return Standard Pacemaker return code
706 */
707 int
708 controld_execute_resource_agent(lrm_state_t *lrm_state, const char *rsc_id,
709 const char *action, const char *userdata,
710 guint interval_ms, int timeout_ms,
711 int start_delay_ms, GHashTable *parameters,
712 int *call_id)
713 {
714 int rc = pcmk_rc_ok;
715 lrmd_key_value_t *params = NULL;
716
717 if (lrm_state->conn == NULL) {
718 return ENOTCONN;
719 }
720
721 // Convert parameters from hash table to list
722 if (parameters != NULL) {
723 const char *key = NULL;
724 const char *value = NULL;
725 GHashTableIter iter;
726
727 g_hash_table_iter_init(&iter, parameters);
728 while (g_hash_table_iter_next(&iter, (gpointer *) &key,
729 (gpointer *) &value)) {
730 params = lrmd_key_value_add(params, key, value);
731 }
732 }
733
734 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
735 rc = controld_execute_remote_agent(lrm_state, rsc_id, action,
736 userdata, interval_ms, timeout_ms,
737 start_delay_ms, params, call_id);
738
739 } else {
740 rc = ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id,
741 action, userdata,
742 interval_ms, timeout_ms,
743 start_delay_ms,
744 lrmd_opt_notify_changes_only,
745 params);
746 if (rc < 0) {
747 rc = pcmk_legacy2rc(rc);
748 } else {
749 *call_id = rc;
750 rc = pcmk_rc_ok;
751 }
752 }
753 return rc;
754 }
755
756 int
757 lrm_state_register_rsc(lrm_state_t * lrm_state,
758 const char *rsc_id,
759 const char *class,
760 const char *provider, const char *agent, enum lrmd_call_options options)
761 {
762 lrmd_t *conn = (lrmd_t *) lrm_state->conn;
763
764 if (conn == NULL) {
765 return -ENOTCONN;
766 }
767
768 if (is_remote_lrmd_ra(agent, provider, NULL)) {
769 return controld_get_executor_state(rsc_id, true)? pcmk_ok : -EINVAL;
770 }
771
772 /* @TODO Implement an asynchronous version of this (currently a blocking
773 * call to the lrmd).
774 */
775 return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider,
776 agent, options);
777 }
778
779 int
780 lrm_state_unregister_rsc(lrm_state_t * lrm_state,
781 const char *rsc_id, enum lrmd_call_options options)
782 {
783 if (!lrm_state->conn) {
784 return -ENOTCONN;
785 }
786
787 if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
788 g_hash_table_remove(lrm_state_table, rsc_id);
789 return pcmk_ok;
790 }
791
792 g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id);
793
794 /* @TODO Optimize this ... this function is a blocking round trip from
795 * client to daemon. The controld_execd_state.c code path that uses this
796 * function should always treat it as an async operation. The executor API
797 * should make an async version available.
798 */
799 return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options);
800 }
801