1 /*
2 * Copyright 2012-2026 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU Lesser General Public License
7 * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #include <errno.h> // EREMOTEIO, ENOMEM
13 #include <stdint.h> // int32_t, uint32_t
14 #include <stdlib.h> // NULL, free, size_t
15 #include <sys/types.h> // gid_t, uid_t
16
17 #include <glib.h> // g_byte_array_free, g_list_*
18 #include <libxml/tree.h> // xmlNode
19 #include <qb/qbipcs.h> // qb_ipcs_connection_t
20 #include <qb/qblog.h> // QB_XS
21
22 #include <crm/common/internal.h>
23 #include <crm/common/ipc.h> // crm_ipc_flags
24 #include <crm/common/logging.h> // CRM_CHECK, CRM_LOG_ASSERT
25 #include <crm/common/results.h> // pcmk_rc_*, pcmk_rc_str
26 #include <crm/crm.h> // CRM_SYSTEM_CRMD
27 #include <crm/lrmd.h> // LRMD_IPC_OP_DESTROY
28
29 #include "pacemaker-execd.h" // lrmd_server_send_notify
30
31 static qb_ipcs_service_t *cib_ro = NULL;
32 static qb_ipcs_service_t *cib_rw = NULL;
33 static qb_ipcs_service_t *cib_shm = NULL;
34
35 static qb_ipcs_service_t *attrd_ipcs = NULL;
36 static qb_ipcs_service_t *crmd_ipcs = NULL;
37 static qb_ipcs_service_t *fencer_ipcs = NULL;
38 static qb_ipcs_service_t *pacemakerd_ipcs = NULL;
39
40 // An IPC provider is a cluster node controller connecting as a client
41 static GList *ipc_providers = NULL;
42
43
44 /* ipc clients == things like cibadmin, crm_resource, connecting locally
45 *
46 * @TODO This should be unnecessary (pcmk__foreach_ipc_client() should be
47 * sufficient)
48 */
49 static GHashTable *ipc_clients = NULL;
50
51 /*!
52 * \internal
53 * \brief Get an IPC proxy provider
54 *
55 * \return Pointer to a provider if one exists, NULL otherwise
56 *
57 * \note Grab the first provider, which is the most recent connection. That way,
58 * if we haven't yet timed out an old, failed connection, we don't try to
59 * use it.
60 */
61 pcmk__client_t *
62 ipc_proxy_get_provider(void)
63 {
64 return ipc_providers? (pcmk__client_t *) (ipc_providers->data) : NULL;
65 }
66
67 /*!
68 * \internal
69 * \brief Accept a client connection on a proxy IPC server
70 *
71 * \param[in,out] c New connection
72 * \param[in] uid Client user id
73 * \param[in] gid Client group id
74 * \param[in] ipc_channel Name of IPC server to proxy
75 *
76 * \return 0 on success, -errno on error
77 */
78 static int32_t
79 ipc_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid, const char *ipc_channel)
80 {
81 pcmk__client_t *client = NULL;
82 pcmk__client_t *ipc_proxy = ipc_proxy_get_provider();
83 xmlNode *msg = NULL;
84
85 if (ipc_proxy == NULL) {
86 pcmk__warn("Cannot proxy IPC connection from uid %d gid %d to %s "
87 "because not connected to cluster",
88 uid, gid, ipc_channel);
89 return -EREMOTEIO;
90 }
91
92 /* This new client is a local IPC client on a Pacemaker Remote controlled
93 * node, needing to access cluster node IPC services.
94 */
95 client = pcmk__new_client(c, uid, gid);
96 if (client == NULL) {
97 return -ENOMEM;
98 }
99
100 /* This ipc client is bound to a single ipc provider. If the
101 * provider goes away, this client is disconnected */
102 client->userdata = pcmk__str_copy(ipc_proxy->id);
103 client->name = pcmk__assert_asprintf("proxy-%s-%d-%.8s", ipc_channel,
104 client->pid, client->id);
105
106 /* Allow remote executor to distinguish between proxied local clients and
107 * actual executor API clients
108 */
109 pcmk__set_client_flags(client, pcmk__client_to_proxy);
110
111 g_hash_table_insert(ipc_clients, client->id, client);
112
113 msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
114 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_NEW);
115 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SERVER, ipc_channel);
116 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
117 lrmd_server_send_notify(ipc_proxy, msg);
118 pcmk__xml_free(msg);
119 pcmk__debug("Accepted IPC proxy connection (session ID %s) from uid %d "
120 "gid %d on channel %s",
121 client->id, uid, gid, ipc_channel);
122 return 0;
123 }
124
125 static int32_t
126 crmd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
127 {
128 return ipc_proxy_accept(c, uid, gid, CRM_SYSTEM_CRMD);
129 }
130
131 static int32_t
132 attrd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
133 {
134 return ipc_proxy_accept(c, uid, gid, PCMK__VALUE_ATTRD);
135 }
136
137 static int32_t
138 fencer_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
139 {
140 return ipc_proxy_accept(c, uid, gid, "stonith-ng");
141 }
142
143 static int32_t
144 pacemakerd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
145 {
146 return -EREMOTEIO;
147 }
148
149 static int32_t
150 cib_proxy_accept_rw(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
151 {
152 return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RW);
153 }
154
155 static int32_t
156 cib_proxy_accept_ro(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
157 {
158 return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RO);
159 }
160
161 int
162 ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml)
163 {
164 const char *session = pcmk__xe_get(xml, PCMK__XA_LRMD_IPC_SESSION);
165 const char *msg_type = pcmk__xe_get(xml, PCMK__XA_LRMD_IPC_OP);
166
167 xmlNode *wrapper = pcmk__xe_first_child(xml, PCMK__XE_LRMD_IPC_MSG, NULL,
168 NULL);
169 xmlNode *msg = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
170
171 pcmk__client_t *ipc_client = NULL;
172 int rc = pcmk_rc_ok;
173
174 if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_ACK, pcmk__str_casei)) {
175 handle_shutdown_ack();
176 return rc;
177 }
178
179 if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_NACK, pcmk__str_casei)) {
180 handle_shutdown_nack();
181 return rc;
182 }
183
184 ipc_client = pcmk__find_client_by_id(session);
185 if (ipc_client == NULL) {
186 xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
187 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY);
188 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, session);
189 lrmd_server_send_notify(ipc_proxy, msg);
190 pcmk__xml_free(msg);
191 return rc;
192 }
193
194 /* This is an event or response from the ipc provider
195 * going to the local ipc client.
196 *
197 * Looking at the chain of events.
198 *
199 * -----remote node----------------|---- cluster node ------
200 * ipc_client <--1--> this code
201 * <--2--> pacemaker-controld:remote_proxy_cb/remote_proxy_relay_event()
202 * <--3--> ipc server
203 *
204 * This function is receiving a msg from connection 2
205 * and forwarding it to connection 1.
206 */
207
208 if (pcmk__str_eq(msg_type, LRMD_IPC_OP_EVENT, pcmk__str_casei)) {
209 pcmk__trace("Sending event to %s", ipc_client->id);
210 rc = pcmk__ipc_send_xml(ipc_client, 0, msg, crm_ipc_server_event);
211
212 } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_RESPONSE, pcmk__str_casei)) {
213 int msg_id = 0;
214
215 pcmk__xe_get_int(xml, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id);
216
217 pcmk__trace("Sending response to %d - %s", ipc_client->request_id,
218 ipc_client->id);
219 rc = pcmk__ipc_send_xml(ipc_client, msg_id, msg, crm_ipc_flags_none);
220
221 CRM_LOG_ASSERT(msg_id == ipc_client->request_id);
222 ipc_client->request_id = 0;
223
224 } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_DESTROY, pcmk__str_casei)) {
225 qb_ipcs_disconnect(ipc_client->ipcs);
226
227 } else {
228 pcmk__err("Unknown ipc proxy msg type %s" , msg_type);
229 }
230
231 if (rc != pcmk_rc_ok) {
232 pcmk__warn("Could not proxy IPC to client %s: %s " QB_XS " rc=%d",
233 ipc_client->id, pcmk_rc_str(rc), rc);
234 }
235
236 return rc;
237 }
238
239 /*!
240 * \internal
241 * \brief Handle a message from an IPC connection
242 *
243 * \param[in,out] c Established IPC connection
244 * \param[in] data The message data read from the connection - this can be
245 * a complete IPC message or just a part of one if it's
246 * very large
247 * \param[size] size Unused
248 *
249 * \return 0 in all cases
250 */
251 static int32_t
252 ipc_proxy_dispatch(qb_ipcs_connection_t *c, void *data, size_t size)
253 {
254 int rc = pcmk_rc_ok;
255 uint32_t id = 0;
256 uint32_t flags = 0;
257 pcmk__client_t *client = pcmk__find_client(c);
258 pcmk__client_t *ipc_proxy = NULL;
259 xmlNode *wrapper = NULL;
260 xmlNode *request = NULL;
261 xmlNode *msg = NULL;
262
263 // Sanity-check, and parse XML from IPC data
264 CRM_CHECK(client != NULL, return 0);
265 if (data == NULL) {
266 pcmk__debug("No IPC data from PID %d", pcmk__client_pid(c));
267 return 0;
268 }
269
270 ipc_proxy = pcmk__find_client_by_id(client->userdata);
271 if (ipc_proxy == NULL) {
272 qb_ipcs_disconnect(client->ipcs);
273 return 0;
274 }
275
276 /* This is a request from the local ipc client going
277 * to the ipc provider.
278 *
279 * Looking at the chain of events.
280 *
281 * -----remote node----------------|---- cluster node ------
282 * ipc_client <--1--> this code
283 * <--2--> pacemaker-controld:remote_proxy_dispatch_internal()
284 * <--3--> ipc server
285 *
286 * This function is receiving a request from connection
287 * 1 and forwarding it to connection 2.
288 */
289 rc = pcmk__ipc_msg_append(&client->buffer, data);
290
291 if (rc == pcmk_rc_ipc_more) {
292 /* We haven't read the complete message yet, so just return. */
293 return 0;
294
295 } else if (rc == pcmk_rc_ok) {
296 /* We've read the complete message and there's already a header on
297 * the front. Pass it off for processing.
298 */
299 request = pcmk__client_data2xml(client, &id, &flags);
300 g_byte_array_free(client->buffer, TRUE);
301 client->buffer = NULL;
302
303 } else {
304 /* Some sort of error occurred reassembling the message. All we can
305 * do is clean up, log an error and return.
306 */
307 pcmk__err("Error when reading IPC message: %s", pcmk_rc_str(rc));
308
309 if (client->buffer != NULL) {
310 g_byte_array_free(client->buffer, TRUE);
311 client->buffer = NULL;
312 }
313
314 return 0;
315 }
316
317 if (request == NULL) {
318 return 0;
319 }
320
321 /* This ensures that synced request/responses happen over the event channel
322 * in the controller, allowing the controller to process the messages async.
323 */
324 pcmk__set_ipc_flags(flags, pcmk__client_name(client), crm_ipc_proxied);
325 client->request_id = id;
326
327 msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
328 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST);
329 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
330 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_CLIENT, pcmk__client_name(client));
331 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_USER, client->user);
332 pcmk__xe_set_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, id);
333
334 // @TODO Use different setter for uint32_t
335 pcmk__xe_set_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, flags);
336
337 wrapper = pcmk__xe_create(msg, PCMK__XE_LRMD_IPC_MSG);
338
339 pcmk__xml_copy(wrapper, request);
340
341 lrmd_server_send_notify(ipc_proxy, msg);
342
343 pcmk__xml_free(request);
344 pcmk__xml_free(msg);
345 return 0;
346 }
347
348 /*!
349 * \internal
350 * \brief Notify a proxy provider that we wish to shut down
351 *
352 * \param[in,out] ipc_proxy IPC client connection to proxy provider
353 *
354 * \return 0 on success, -1 on error
355 */
356 int
357 ipc_proxy_shutdown_req(pcmk__client_t *ipc_proxy)
358 {
359 xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
360 int rc;
361
362 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ);
363
364 /* We don't really have a session, but the controller needs this attribute
365 * to recognize this as proxy communication.
366 */
367 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, "0");
368
369 rc = (lrmd_server_send_notify(ipc_proxy, msg) != pcmk_rc_ok)? -1 : 0;
370 pcmk__xml_free(msg);
371 return rc;
372 }
373
374 /*!
375 * \internal
376 * \brief Destroy a client IPC connection
377 *
378 * \param[in] c Connection to destroy
379 *
380 * \return 0 (i.e. do not re-run this callback)
381 */
382 static int32_t
383 ipc_proxy_closed(qb_ipcs_connection_t *c)
384 {
385 pcmk__client_t *client = pcmk__find_client(c);
386
387 if (client == NULL) {
388 pcmk__trace("Ignoring request to clean up unknown connection %p", c);
389 } else {
390 pcmk__client_t *ipc_proxy = pcmk__find_client_by_id(client->userdata);
391
392 pcmk__trace("Cleaning up closed client connection %p", c);
393
394 if (ipc_proxy != NULL) {
395 xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
396 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY);
397 pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
398 lrmd_server_send_notify(ipc_proxy, msg);
399 pcmk__xml_free(msg);
400 }
401
402 g_hash_table_remove(ipc_clients, client->id);
403 g_clear_pointer(&client->userdata, free);
404 pcmk__free_client(client);
405 }
406
407 return 0;
408 }
409
410 /*!
411 * \internal
412 * \brief Destroy a client IPC connection
413 *
414 * \param[in] c Connection to destroy
415 *
416 * \note We handle a destroyed connection the same as a closed one,
417 * but we need a separate handler because the return type is different.
418 */
419 static void
420 ipc_proxy_destroy(qb_ipcs_connection_t *c)
421 {
422 pcmk__trace("Destroying client connection %p", c);
423 ipc_proxy_closed(c);
424 }
425
426 static struct qb_ipcs_service_handlers crmd_proxy_callbacks = {
427 .connection_accept = crmd_proxy_accept,
428 .connection_created = NULL,
429 .msg_process = ipc_proxy_dispatch,
430 .connection_closed = ipc_proxy_closed,
431 .connection_destroyed = ipc_proxy_destroy
432 };
433
434 static struct qb_ipcs_service_handlers attrd_proxy_callbacks = {
435 .connection_accept = attrd_proxy_accept,
436 .connection_created = NULL,
437 .msg_process = ipc_proxy_dispatch,
438 .connection_closed = ipc_proxy_closed,
439 .connection_destroyed = ipc_proxy_destroy
440 };
441
442 static struct qb_ipcs_service_handlers fencer_proxy_callbacks = {
443 .connection_accept = fencer_proxy_accept,
444 .connection_created = NULL,
445 .msg_process = ipc_proxy_dispatch,
446 .connection_closed = ipc_proxy_closed,
447 .connection_destroyed = ipc_proxy_destroy
448 };
449
450 static struct qb_ipcs_service_handlers pacemakerd_proxy_callbacks = {
451 .connection_accept = pacemakerd_proxy_accept,
452 .connection_created = NULL,
453 .msg_process = NULL,
454 .connection_closed = NULL,
455 .connection_destroyed = NULL
456 };
457
458 static struct qb_ipcs_service_handlers cib_proxy_callbacks_ro = {
459 .connection_accept = cib_proxy_accept_ro,
460 .connection_created = NULL,
461 .msg_process = ipc_proxy_dispatch,
462 .connection_closed = ipc_proxy_closed,
463 .connection_destroyed = ipc_proxy_destroy
464 };
465
466 static struct qb_ipcs_service_handlers cib_proxy_callbacks_rw = {
467 .connection_accept = cib_proxy_accept_rw,
468 .connection_created = NULL,
469 .msg_process = ipc_proxy_dispatch,
470 .connection_closed = ipc_proxy_closed,
471 .connection_destroyed = ipc_proxy_destroy
472 };
473
474 void
475 ipc_proxy_add_provider(pcmk__client_t *ipc_proxy)
476 {
477 // Prepending ensures the most recent connection is always first
478 ipc_providers = g_list_prepend(ipc_providers, ipc_proxy);
479 }
480
481 void
482 ipc_proxy_remove_provider(pcmk__client_t *ipc_proxy)
483 {
484 GHashTableIter iter;
485 pcmk__client_t *ipc_client = NULL;
486 char *key = NULL;
487 GList *remove_these = NULL;
488 GList *gIter = NULL;
489
490 ipc_providers = g_list_remove(ipc_providers, ipc_proxy);
491
492 g_hash_table_iter_init(&iter, ipc_clients);
493 while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & ipc_client)) {
494 const char *proxy_id = ipc_client->userdata;
495 if (pcmk__str_eq(proxy_id, ipc_proxy->id, pcmk__str_casei)) {
496 pcmk__info("IPC proxy connection for client %s pid %d destroyed "
497 "because cluster node disconnected",
498 ipc_client->id, ipc_client->pid);
499 /* we can't remove during the iteration, so copy items
500 * to a list we can destroy later */
501 remove_these = g_list_append(remove_these, ipc_client);
502 }
503 }
504
505 for (gIter = remove_these; gIter != NULL; gIter = gIter->next) {
506 ipc_client = gIter->data;
507
508 // Disconnection callback will free the client here
509 qb_ipcs_disconnect(ipc_client->ipcs);
510 }
511
512 /* just frees the list, not the elements in the list */
513 g_list_free(remove_these);
514 }
515
516 void
517 ipc_proxy_init(void)
518 {
519 ipc_clients = pcmk__strkey_table(NULL, NULL);
520
521 pcmk__serve_based_ipc(&cib_ro, &cib_rw, &cib_shm, &cib_proxy_callbacks_ro,
522 &cib_proxy_callbacks_rw);
523 pcmk__serve_attrd_ipc(&attrd_ipcs, &attrd_proxy_callbacks);
524 pcmk__serve_fenced_ipc(&fencer_ipcs, &fencer_proxy_callbacks);
525 pcmk__serve_pacemakerd_ipc(&pacemakerd_ipcs, &pacemakerd_proxy_callbacks);
526 crmd_ipcs = pcmk__serve_controld_ipc(&crmd_proxy_callbacks);
527 if (crmd_ipcs == NULL) {
528 pcmk__err("Failed to create controller: exiting and inhibiting "
529 "respawn");
530 pcmk__warn("Verify pacemaker and pacemaker_remote are not both "
531 "enabled");
532 crm_exit(CRM_EX_FATAL);
533 }
534 }
535
536 void
537 ipc_proxy_cleanup(void)
538 {
|
(1) Event path: |
Condition "_p", taking true branch. |
539 g_clear_pointer(&ipc_providers, g_list_free);
|
(2) Event path: |
Condition "_p", taking true branch. |
540 g_clear_pointer(&ipc_clients, g_hash_table_destroy);
541
542 pcmk__stop_based_ipc(cib_ro, cib_rw, cib_shm);
543
|
(3) Event path: |
Condition "_p", taking true branch. |
544 g_clear_pointer(&attrd_ipcs, qb_ipcs_destroy);
|
CID (unavailable; MK=49bb0336a49fb5b26f369ed9941b49f8) (#4 of 6): Inconsistent C union access (INCONSISTENT_UNION_ACCESS): |
|
(4) Event assign_union_field: |
The union field "in" of "_pp" is written. |
|
(5) Event inconsistent_union_field_access: |
In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in". |
545 g_clear_pointer(&fencer_ipcs, qb_ipcs_destroy);
546 g_clear_pointer(&pacemakerd_ipcs, qb_ipcs_destroy);
547 g_clear_pointer(&crmd_ipcs, qb_ipcs_destroy);
548
549 cib_ro = NULL;
550 cib_rw = NULL;
551 cib_shm = NULL;
552 }
553