1 /*
2 * Copyright 2004-2026 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU Lesser General Public License
7 * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #if defined(HAVE_UCRED) || defined(HAVE_SOCKPEERCRED)
13 #include <sys/socket.h>
14 #elif defined(HAVE_GETPEERUCRED)
15 #include <ucred.h>
16 #endif
17
18 #include <stdbool.h>
19 #include <stdio.h>
20 #include <sys/types.h>
21 #include <errno.h>
22 #include <bzlib.h>
23
24 #include <crm/crm.h> /* indirectly: pcmk_err_generic */
25 #include <crm/common/xml.h>
26 #include <crm/common/ipc.h>
27 #include "crmcommon_private.h"
28
29 static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
30 uid_t refuid, gid_t refgid, pid_t *gotpid,
31 uid_t *gotuid, gid_t *gotgid);
32
33 /*!
34 * \brief Create a new object for using Pacemaker daemon IPC
35 *
36 * \param[out] api Where to store new IPC object
37 * \param[in] server Which Pacemaker daemon the object is for
38 *
39 * \return Standard Pacemaker result code
40 *
41 * \note The caller is responsible for freeing *api using pcmk_free_ipc_api().
42 * \note This is intended to supersede crm_ipc_new() but currently only
43 * supports the controller, pacemakerd, and schedulerd IPC API.
44 */
45 int
46 pcmk_new_ipc_api(pcmk_ipc_api_t **api, enum pcmk_ipc_server server)
47 {
|
(1) Event path: |
Condition "api == NULL", taking false branch. |
48 if (api == NULL) {
49 return EINVAL;
50 }
51
52 *api = calloc(1, sizeof(pcmk_ipc_api_t));
|
(2) Event path: |
Condition "*api == NULL", taking false branch. |
53 if (*api == NULL) {
54 return errno;
55 }
56
57 (*api)->server = server;
|
(3) Event path: |
Condition "pcmk_ipc_name(*api, false /* 0 */) == NULL", taking true branch. |
58 if (pcmk_ipc_name(*api, false) == NULL) {
|
CID (unavailable; MK=c32b336c85895d1850ba9e3fba83431d) (#1 of 4): Inconsistent C union access (INCONSISTENT_UNION_ACCESS): |
|
(4) Event assign_union_field: |
The union field "in" of "_pp" is written. |
|
(5) Event inconsistent_union_field_access: |
In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in". |
59 g_clear_pointer(api, pcmk_free_ipc_api);
60 return EOPNOTSUPP;
61 }
62
63 // Set server methods
64 switch (server) {
65 case pcmk_ipc_attrd:
66 (*api)->cmds = pcmk__attrd_api_methods();
67 break;
68
69 case pcmk_ipc_based:
70 break;
71
72 case pcmk_ipc_controld:
73 (*api)->cmds = pcmk__controld_api_methods();
74 break;
75
76 case pcmk_ipc_execd:
77 break;
78
79 case pcmk_ipc_fenced:
80 break;
81
82 case pcmk_ipc_pacemakerd:
83 (*api)->cmds = pcmk__pacemakerd_api_methods();
84 break;
85
86 case pcmk_ipc_schedulerd:
87 (*api)->cmds = pcmk__schedulerd_api_methods();
88 break;
89
90 default: // pcmk_ipc_unknown
91 g_clear_pointer(api, pcmk_free_ipc_api);
92 return EINVAL;
93 }
94 if ((*api)->cmds == NULL) {
95 g_clear_pointer(api, pcmk_free_ipc_api);
96 return ENOMEM;
97 }
98
99 (*api)->ipc = crm_ipc_new(pcmk_ipc_name(*api, false), 0);
100 if ((*api)->ipc == NULL) {
101 g_clear_pointer(api, pcmk_free_ipc_api);
102 return ENOMEM;
103 }
104
105 // If daemon API has its own data to track, allocate it
106 if (((*api)->cmds->new_data != NULL)
107 && ((*api)->cmds->new_data(*api) != pcmk_rc_ok)) {
108
109 g_clear_pointer(api, pcmk_free_ipc_api);
110 return ENOMEM;
111 }
112
113 pcmk__trace("Created %s API IPC object", pcmk_ipc_name(*api, true));
114 return pcmk_rc_ok;
115 }
116
117 static void
118 free_daemon_specific_data(pcmk_ipc_api_t *api)
119 {
120 if ((api != NULL) && (api->cmds != NULL)) {
121 if ((api->cmds->free_data != NULL) && (api->api_data != NULL)) {
122 g_clear_pointer(&api->api_data, api->cmds->free_data);
123 }
124
125 g_clear_pointer(&api->cmds, free);
126 }
127 }
128
129 /*!
130 * \internal
131 * \brief Call an IPC API event callback, if one is registed
132 *
133 * \param[in,out] api IPC API connection
134 * \param[in] event_type The type of event that occurred
135 * \param[in] status Event status
136 * \param[in,out] event_data Event-specific data
137 */
138 void
139 pcmk__call_ipc_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
140 crm_exit_t status, void *event_data)
141 {
142 if ((api != NULL) && (api->cb != NULL)) {
143 api->cb(api, event_type, status, event_data, api->user_data);
144 }
145 }
146
147 /*!
148 * \internal
149 * \brief Clean up after an IPC disconnect
150 *
151 * \param[in,out] user_data IPC API connection that disconnected
152 *
153 * \note This function can be used as a main loop IPC destroy callback.
154 */
155 static void
156 ipc_post_disconnect(gpointer user_data)
157 {
158 pcmk_ipc_api_t *api = user_data;
159
160 pcmk__info("Disconnected from %s", pcmk_ipc_name(api, true));
161
162 // Perform any daemon-specific handling needed
163 if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) {
164 api->cmds->post_disconnect(api);
165 }
166
167 // Call client's registered event callback
168 pcmk__call_ipc_callback(api, pcmk_ipc_event_disconnect, CRM_EX_DISCONNECT,
169 NULL);
170
171 /* If this is being called from a running main loop, mainloop_gio_destroy()
172 * will free ipc and mainloop_io immediately after calling this function.
173 * If this is called from a stopped main loop, these will leak, so the best
174 * practice is to close the connection before stopping the main loop.
175 */
176 api->ipc = NULL;
177 api->mainloop_io = NULL;
178
179 if (api->free_on_disconnect) {
180 /* pcmk_free_ipc_api() has already been called, but did not free api
181 * or api->cmds because this function needed them. Do that now.
182 */
183 free_daemon_specific_data(api);
184 pcmk__trace("Freeing IPC API object after disconnect");
185 free(api);
186 }
187 }
188
189 /*!
190 * \brief Free the contents of an IPC API object
191 *
192 * \param[in,out] api IPC API object to free
193 */
194 void
195 pcmk_free_ipc_api(pcmk_ipc_api_t *api)
196 {
197 bool free_on_disconnect = false;
198
199 if (api == NULL) {
200 return;
201 }
202 pcmk__debug("Releasing %s IPC API", pcmk_ipc_name(api, true));
203
204 if (api->ipc != NULL) {
205 if (api->mainloop_io != NULL) {
206 /* We need to keep the api pointer itself around, because it is the
207 * user data for the IPC client destroy callback. That will be
208 * triggered by the pcmk_disconnect_ipc() call below, but it might
209 * happen later in the main loop (if still running).
210 *
211 * This flag tells the destroy callback to free the object. It can't
212 * do that unconditionally, because the application might call this
213 * function after a disconnect that happened by other means.
214 */
215 free_on_disconnect = api->free_on_disconnect = true;
216 }
217 pcmk_disconnect_ipc(api); // Frees api if free_on_disconnect is true
218 }
219 if (!free_on_disconnect) {
220 free_daemon_specific_data(api);
221 pcmk__trace("Freeing IPC API object");
222 free(api);
223 }
224 }
225
226 /*!
227 * \brief Get the IPC name used with an IPC API connection
228 *
229 * \param[in] api IPC API connection
230 * \param[in] for_log If true, return human-friendly name instead of IPC name
231 *
232 * \return IPC API's human-friendly or connection name, or if none is available,
233 * "Pacemaker" if for_log is true and NULL if for_log is false
234 */
235 const char *
236 pcmk_ipc_name(const pcmk_ipc_api_t *api, bool for_log)
237 {
238 if (api == NULL) {
239 return for_log? "Pacemaker" : NULL;
240 }
241 if (for_log) {
242 const char *name = pcmk__server_log_name(api->server);
243
244 return pcmk__s(name, "Pacemaker");
245 }
246 switch (api->server) {
247 // These servers do not have pcmk_ipc_api_t implementations yet
248 case pcmk_ipc_based:
249 case pcmk_ipc_execd:
250 case pcmk_ipc_fenced:
251 return NULL;
252
253 default:
254 return pcmk__server_ipc_name(api->server);
255 }
256 }
257
258 /*!
259 * \brief Check whether an IPC API connection is active
260 *
261 * \param[in,out] api IPC API connection
262 *
263 * \return true if IPC is connected, false otherwise
264 */
265 bool
266 pcmk_ipc_is_connected(pcmk_ipc_api_t *api)
267 {
268 return (api != NULL) && crm_ipc_connected(api->ipc);
269 }
270
271 /*!
272 * \internal
273 * \brief Call the daemon-specific API's dispatch function
274 *
275 * Perform daemon-specific handling of IPC reply dispatch. It is the daemon
276 * method's responsibility to call the client's registered event callback, as
277 * well as allocate and free any event data.
278 *
279 * \param[in,out] api IPC API connection
280 * \param[in,out] message IPC reply XML to dispatch
281 */
282 static bool
283 call_api_dispatch(pcmk_ipc_api_t *api, xmlNode *message)
284 {
285 pcmk__log_xml_trace(message, "ipc-received");
286 if ((api->cmds != NULL) && (api->cmds->dispatch != NULL)) {
287 return api->cmds->dispatch(api, message);
288 }
289
290 return false;
291 }
292
293 /*!
294 * \internal
295 * \brief Dispatch previously read IPC data
296 *
297 * \param[in] buffer Data read from IPC
298 * \param[in,out] api IPC object
299 *
300 * \return Standard Pacemaker return code. In particular:
301 *
302 * pcmk_rc_ok: There are no more messages expected from the server. Quit
303 * reading.
304 * EINPROGRESS: There are more messages expected from the server. Keep reading.
305 *
306 * All other values indicate an error.
307 */
308 static int
309 dispatch_ipc_data(const char *buffer, pcmk_ipc_api_t *api)
310 {
311 bool more = false;
312 xmlNode *msg;
313
314 if (buffer == NULL) {
315 pcmk__warn("Empty message received from %s IPC",
316 pcmk_ipc_name(api, true));
317 return ENOMSG;
318 }
319
320 msg = pcmk__xml_parse(buffer);
321 if (msg == NULL) {
322 pcmk__warn("Malformed message received from %s IPC",
323 pcmk_ipc_name(api, true));
324 return EPROTO;
325 }
326
327 more = call_api_dispatch(api, msg);
328 pcmk__xml_free(msg);
329
330 if (more) {
331 return EINPROGRESS;
332 } else {
333 return pcmk_rc_ok;
334 }
335 }
336
337 /*!
338 * \internal
339 * \brief Dispatch data read from IPC source
340 *
341 * \param[in] buffer Data read from IPC
342 * \param[in] length Number of bytes of data in buffer (ignored)
343 * \param[in,out] user_data IPC object
344 *
345 * \return Always 0 (meaning connection is still required)
346 *
347 * \note This function can be used as a main loop IPC dispatch callback.
348 */
349 static int
350 dispatch_ipc_source_data(const char *buffer, ssize_t length, gpointer user_data)
351 {
352 pcmk_ipc_api_t *api = user_data;
353
354 CRM_CHECK(api != NULL, return 0);
355 dispatch_ipc_data(buffer, api);
356 return 0;
357 }
358
359 /*!
360 * \brief Check whether an IPC connection has data available (without main loop)
361 *
362 * \param[in] api IPC API connection
363 * \param[in] timeout_ms If less than 0, poll indefinitely; if 0, poll once
364 * and return immediately; otherwise, poll for up to
365 * this many milliseconds
366 *
367 * \return Standard Pacemaker return code
368 *
369 * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call
370 * this function to check whether IPC data is available. Return values of
371 * interest include pcmk_rc_ok meaning data is available, and EAGAIN
372 * meaning no data is available; all other values indicate errors.
373 * \todo This does not allow the caller to poll multiple file descriptors at
374 * once. If there is demand for that, we could add a wrapper for
375 * pcmk__ipc_fd(api->ipc), so the caller can call poll() themselves.
376 */
377 int
378 pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms)
379 {
380 int rc;
381 struct pollfd pollfd = { 0, };
382
383 if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) {
384 return EINVAL;
385 }
386
387 rc = pcmk__ipc_fd(api->ipc, &(pollfd.fd));
388 if (rc != pcmk_rc_ok) {
389 pcmk__debug("Could not obtain file descriptor for %s IPC: %s",
390 pcmk_ipc_name(api, true), pcmk_rc_str(rc));
391 return rc;
392 }
393
394 pollfd.events = POLLIN;
395 rc = poll(&pollfd, 1, timeout_ms);
396 if (rc < 0) {
397 /* Some UNIX systems return negative and set EAGAIN for failure to
398 * allocate memory; standardize the return code in that case
399 */
400 return (errno == EAGAIN)? ENOMEM : errno;
401 } else if (rc == 0) {
402 return EAGAIN;
403 }
404 return pcmk_rc_ok;
405 }
406
407 /*!
408 * \brief Dispatch available messages on an IPC connection (without main loop)
409 *
410 * \param[in,out] api IPC API connection
411 *
412 * \return Standard Pacemaker return code
413 *
414 * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call
415 * this function when IPC data is available.
416 */
417 void
418 pcmk_dispatch_ipc(pcmk_ipc_api_t *api)
419 {
420 if (api == NULL) {
421 return;
422 }
423 while (crm_ipc_ready(api->ipc) > 0) {
424 if (crm_ipc_read(api->ipc) > 0) {
425 dispatch_ipc_data(crm_ipc_buffer(api->ipc), api);
426 pcmk__ipc_free_client_buffer(api->ipc);
427 }
428 }
429 }
430
431 // \return Standard Pacemaker return code
432 static int
433 connect_with_main_loop(pcmk_ipc_api_t *api)
434 {
435 int rc;
436
437 struct ipc_client_callbacks callbacks = {
438 .dispatch = dispatch_ipc_source_data,
439 .destroy = ipc_post_disconnect,
440 };
441
442 rc = pcmk__add_mainloop_ipc(api->ipc, G_PRIORITY_DEFAULT, api,
443 &callbacks, &(api->mainloop_io));
444 if (rc != pcmk_rc_ok) {
445 return rc;
446 }
447 pcmk__debug("Connected to %s IPC (attached to main loop)",
448 pcmk_ipc_name(api, true));
449 /* After this point, api->mainloop_io owns api->ipc, so api->ipc
450 * should not be explicitly freed.
451 */
452 return pcmk_rc_ok;
453 }
454
455 // \return Standard Pacemaker return code
456 static int
457 connect_without_main_loop(pcmk_ipc_api_t *api)
458 {
459 int rc = pcmk__connect_generic_ipc(api->ipc);
460
461 if (rc != pcmk_rc_ok) {
462 crm_ipc_close(api->ipc);
463 } else {
464 pcmk__debug("Connected to %s IPC (without main loop)",
465 pcmk_ipc_name(api, true));
466 }
467 return rc;
468 }
469
470 /*!
471 * \internal
472 * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors
473 * and ECONNREFUSED)
474 *
475 * \param[in,out] api IPC API instance
476 * \param[in] dispatch_type How IPC replies should be dispatched
477 * \param[in] attempts How many times to try (in case of soft error)
478 *
479 * \return Standard Pacemaker return code
480 */
481 int
482 pcmk__connect_ipc_retry_conrefused(pcmk_ipc_api_t *api,
483 enum pcmk_ipc_dispatch dispatch_type,
484 int attempts)
485 {
486 int remaining = attempts;
487 int rc = pcmk_rc_ok;
488
489 do {
490 if (rc == ECONNREFUSED) {
491 pcmk__sleep_ms((attempts - remaining) * 500);
492 }
493 rc = pcmk__connect_ipc(api, dispatch_type, remaining);
494 remaining--;
495 } while (rc == ECONNREFUSED && remaining >= 0);
496
497 return rc;
498 }
499
500
501 /*!
502 * \internal
503 * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors)
504 *
505 * \param[in,out] api IPC API instance
506 * \param[in] dispatch_type How IPC replies should be dispatched
507 * \param[in] attempts How many times to try (in case of soft error)
508 *
509 * \return Standard Pacemaker return code
510 */
511 int
512 pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type,
513 int attempts)
514 {
515 int rc = pcmk_rc_ok;
516
517 if ((api == NULL) || (attempts < 1)) {
518 return EINVAL;
519 }
520
521 if (api->ipc == NULL) {
522 api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), 0);
523 if (api->ipc == NULL) {
524 return ENOMEM;
525 }
526 }
527
528 if (crm_ipc_connected(api->ipc)) {
529 pcmk__trace("Already connected to %s", pcmk_ipc_name(api, true));
530 return pcmk_rc_ok;
531 }
532
533 api->dispatch_type = dispatch_type;
534
535 pcmk__debug("Attempting connection to %s (up to %d time%s)",
536 pcmk_ipc_name(api, true), attempts, pcmk__plural_s(attempts));
537 for (int remaining = attempts - 1; remaining >= 0; --remaining) {
538 switch (dispatch_type) {
539 case pcmk_ipc_dispatch_main:
540 rc = connect_with_main_loop(api);
541 break;
542
543 case pcmk_ipc_dispatch_sync:
544 case pcmk_ipc_dispatch_poll:
545 rc = connect_without_main_loop(api);
546 break;
547 }
548
549 if ((remaining == 0) || ((rc != EAGAIN) && (rc != EALREADY))) {
550 break; // Result is final
551 }
552
553 // Retry after soft error (interrupted by signal, etc.)
554 pcmk__sleep_ms((attempts - remaining) * 500);
555 pcmk__debug("Re-attempting connection to %s (%d attempt%s remaining)",
556 pcmk_ipc_name(api, true), remaining,
557 pcmk__plural_s(remaining));
558 }
559
560 if (rc != pcmk_rc_ok) {
561 return rc;
562 }
563
564 if ((api->cmds != NULL) && (api->cmds->post_connect != NULL)) {
565 rc = api->cmds->post_connect(api);
566 if (rc != pcmk_rc_ok) {
567 crm_ipc_close(api->ipc);
568 }
569 }
570 return rc;
571 }
572
573 /*!
574 * \brief Connect to a Pacemaker daemon via IPC
575 *
576 * \param[in,out] api IPC API instance
577 * \param[in] dispatch_type How IPC replies should be dispatched
578 *
579 * \return Standard Pacemaker return code
580 */
581 int
582 pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
583 {
584 int rc = pcmk__connect_ipc(api, dispatch_type, 2);
585
586 if (rc != pcmk_rc_ok) {
587 pcmk__err("Connection to %s failed: %s", pcmk_ipc_name(api, true),
588 pcmk_rc_str(rc));
589 }
590 return rc;
591 }
592
593 /*!
594 * \brief Disconnect an IPC API instance
595 *
596 * \param[in,out] api IPC API connection
597 *
598 * \return Standard Pacemaker return code
599 *
600 * \note If the connection is attached to a main loop, this function should be
601 * called before quitting the main loop, to ensure that all memory is
602 * freed.
603 */
604 void
605 pcmk_disconnect_ipc(pcmk_ipc_api_t *api)
606 {
607 if ((api == NULL) || (api->ipc == NULL)) {
608 return;
609 }
610 switch (api->dispatch_type) {
611 case pcmk_ipc_dispatch_main:
612 {
613 mainloop_io_t *mainloop_io = api->mainloop_io;
614
615 // Make sure no code with access to api can use these again
616 api->mainloop_io = NULL;
617 api->ipc = NULL;
618
619 mainloop_del_ipc_client(mainloop_io);
620 // After this point api might have already been freed
621 }
622 break;
623
624 case pcmk_ipc_dispatch_poll:
625 case pcmk_ipc_dispatch_sync:
626 {
627 crm_ipc_t *ipc = api->ipc;
628
629 // Make sure no code with access to api can use ipc again
630 api->ipc = NULL;
631
632 // This should always be the case already, but to be safe
633 api->free_on_disconnect = false;
634
635 crm_ipc_close(ipc);
636 crm_ipc_destroy(ipc);
637 ipc_post_disconnect(api);
638 }
639 break;
640 }
641 }
642
643 /*!
644 * \brief Register a callback for IPC API events
645 *
646 * \param[in,out] api IPC API connection
647 * \param[in] callback Callback to register
648 * \param[in] userdata Caller data to pass to callback
649 *
650 * \note This function may be called multiple times to update the callback
651 * and/or user data. The caller remains responsible for freeing
652 * userdata in any case (after the IPC is disconnected, if the
653 * user data is still registered with the IPC).
654 */
655 void
656 pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb,
657 void *user_data)
658 {
659 if (api == NULL) {
660 return;
661 }
662 api->cb = cb;
663 api->user_data = user_data;
664 }
665
666 /*!
667 * \internal
668 * \brief Send an XML request across an IPC API connection
669 *
670 * \param[in,out] api IPC API connection
671 * \param[in] request XML request to send
672 *
673 * \return Standard Pacemaker return code
674 *
675 * \note Daemon-specific IPC API functions should call this function to send
676 * requests, because it handles different dispatch types appropriately.
677 */
678 int
679 pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request)
680 {
681 int rc;
682 xmlNode *reply = NULL;
683 enum crm_ipc_flags flags = crm_ipc_flags_none;
684
685 if ((api == NULL) || (api->ipc == NULL) || (request == NULL)) {
686 return EINVAL;
687 }
688 pcmk__log_xml_trace(request, "ipc-sent");
689
690 // Synchronous dispatch requires waiting for a reply
691 if ((api->dispatch_type == pcmk_ipc_dispatch_sync)
692 && (api->cmds != NULL)
693 && (api->cmds->reply_expected != NULL)
694 && (api->cmds->reply_expected(api, request))) {
695 flags = crm_ipc_client_response;
696 }
697
698 /* The 0 here means a default timeout of 5 seconds
699 *
700 * @TODO Maybe add a timeout_ms member to pcmk_ipc_api_t and a
701 * pcmk_set_ipc_timeout() setter for it, then use it here.
702 */
703 rc = crm_ipc_send(api->ipc, request, flags, 0, &reply);
704
705 if (rc < 0) {
706 return pcmk_legacy2rc(rc);
707 } else if (rc == 0) {
708 return ENODATA;
709 }
710
711 // With synchronous dispatch, we dispatch any reply now
712 if (reply != NULL) {
713 bool more = call_api_dispatch(api, reply);
714
715 pcmk__xml_free(reply);
716
717 while (more) {
718 rc = crm_ipc_read(api->ipc);
719
720 if (rc == -EAGAIN) {
721 continue;
722 } else if (rc == -ENOMSG || rc == pcmk_ok) {
723 return pcmk_rc_ok;
724 } else if (rc < 0) {
725 return -rc;
726 }
727
728 rc = dispatch_ipc_data(crm_ipc_buffer(api->ipc), api);
729 pcmk__ipc_free_client_buffer(api->ipc);
730
731 if (rc == pcmk_rc_ok) {
732 more = false;
733 } else if (rc == EINPROGRESS) {
734 more = true;
735 } else {
736 continue;
737 }
738 }
739 }
740 return pcmk_rc_ok;
741 }
742
743 /*!
744 * \internal
745 * \brief Create the XML for an IPC request to purge a node from the peer cache
746 *
747 * \param[in] api IPC API connection
748 * \param[in] node_name If not NULL, name of node to purge
749 * \param[in] nodeid If not 0, node ID of node to purge
750 *
751 * \return Newly allocated IPC request XML
752 *
753 * \note The controller, fencer, and pacemakerd use the same request syntax, but
754 * the attribute manager uses a different one. The CIB manager doesn't
755 * have any syntax for it. The executor and scheduler don't connect to the
756 * cluster layer and thus don't have or need any syntax for it.
757 *
758 * \todo Modify the attribute manager to accept the common syntax (as well
759 * as its current one, for compatibility with older clients). Modify
760 * the CIB manager to accept and honor the common syntax. Modify the
761 * executor and scheduler to accept the syntax (immediately returning
762 * success), just for consistency. Modify this function to use the
763 * common syntax with all daemons if their version supports it.
764 */
765 static xmlNode *
766 create_purge_node_request(const pcmk_ipc_api_t *api, const char *node_name,
767 uint32_t nodeid)
768 {
769 xmlNode *request = NULL;
770 const char *client = crm_system_name? crm_system_name : "client";
771
772 switch (api->server) {
773 case pcmk_ipc_attrd:
774 request = pcmk__xe_create(NULL, __func__);
775 pcmk__xe_set(request, PCMK__XA_T, PCMK__VALUE_ATTRD);
776 pcmk__xe_set(request, PCMK__XA_SRC, crm_system_name);
777 pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE);
778 pcmk__xe_set_bool(request, PCMK__XA_REAP, true);
779 pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node_name);
780 if (nodeid > 0) {
781 pcmk__xe_set_int(request, PCMK__XA_ATTR_HOST_ID, nodeid);
782 }
783 break;
784
785 case pcmk_ipc_controld:
786 case pcmk_ipc_fenced:
787 case pcmk_ipc_pacemakerd:
788 request = pcmk__new_request(api->server, client, NULL,
789 pcmk_ipc_name(api, false),
790 CRM_OP_RM_NODE_CACHE, NULL);
791 if (nodeid > 0) {
792 pcmk__xe_set_ll(request, PCMK_XA_ID, (long long) nodeid);
793 }
794 pcmk__xe_set(request, PCMK_XA_UNAME, node_name);
795 break;
796
797 case pcmk_ipc_based:
798 case pcmk_ipc_execd:
799 case pcmk_ipc_schedulerd:
800 break;
801
802 default: // pcmk_ipc_unknown (shouldn't be possible)
803 return NULL;
804 }
805 return request;
806 }
807
808 /*!
809 * \brief Ask a Pacemaker daemon to purge a node from its peer cache
810 *
811 * \param[in,out] api IPC API connection
812 * \param[in] node_name If not NULL, name of node to purge
813 * \param[in] nodeid If not 0, node ID of node to purge
814 *
815 * \return Standard Pacemaker return code
816 *
817 * \note At least one of node_name or nodeid must be specified.
818 */
819 int
820 pcmk_ipc_purge_node(pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid)
821 {
822 int rc = 0;
823 xmlNode *request = NULL;
824
825 if (api == NULL) {
826 return EINVAL;
827 }
828 if ((node_name == NULL) && (nodeid == 0)) {
829 return EINVAL;
830 }
831
832 request = create_purge_node_request(api, node_name, nodeid);
833 if (request == NULL) {
834 return EOPNOTSUPP;
835 }
836 rc = pcmk__send_ipc_request(api, request);
837 pcmk__xml_free(request);
838
839 pcmk__debug("%s peer cache purge of node %s[%" PRIu32 "]: rc=%d",
840 pcmk_ipc_name(api, true), pcmk__s(node_name, "(unnamed)"),
841 nodeid, rc);
842 return rc;
843 }
844
845 /*
846 * Generic IPC API (to eventually be deprecated as public API and made internal)
847 */
848
849 struct crm_ipc_s {
850 struct pollfd pfd;
851 int need_reply;
852 GByteArray *buffer;
853 char *server_name; // server IPC name being connected to
854 qb_ipcc_connection_t *ipc;
855 };
856
857 /*!
858 * \brief Create a new (legacy) object for using Pacemaker daemon IPC
859 *
860 * \param[in] name IPC system name to connect to
861 * \param[in] max_size Use a maximum IPC buffer size of at least this size
862 *
863 * \return Newly allocated IPC object on success, NULL otherwise
864 *
865 * \note The caller is responsible for freeing the result using
866 * crm_ipc_destroy().
867 * \note This should be considered deprecated for use with daemons supported by
868 * pcmk_new_ipc_api().
869 * \note @COMPAT Since 3.0.1, \p max_size is ignored and the default given by
870 * \c crm_ipc_default_buffer_size() will be used instead.
871 */
872 crm_ipc_t *
873 crm_ipc_new(const char *name, size_t max_size)
874 {
875 crm_ipc_t *client = NULL;
876
877 client = calloc(1, sizeof(crm_ipc_t));
878 if (client == NULL) {
879 pcmk__err("Could not create IPC connection: %s", strerror(errno));
880 return NULL;
881 }
882
883 client->server_name = strdup(name);
884 if (client->server_name == NULL) {
885 pcmk__err("Could not create %s IPC connection: %s", name,
886 strerror(errno));
887 free(client);
888 return NULL;
889 }
890
891 client->buffer = NULL;
892 client->pfd.fd = -1;
893 client->pfd.events = POLLIN;
894 client->pfd.revents = 0;
895
896 return client;
897 }
898
899 /*!
900 * \internal
901 * \brief Connect a generic (not daemon-specific) IPC object
902 *
903 * \param[in,out] ipc Generic IPC object to connect
904 *
905 * \return Standard Pacemaker return code
906 */
907 int
908 pcmk__connect_generic_ipc(crm_ipc_t *ipc)
909 {
910 uid_t cl_uid = 0;
911 gid_t cl_gid = 0;
912 pid_t found_pid = 0;
913 uid_t found_uid = 0;
914 gid_t found_gid = 0;
915 int rc = pcmk_rc_ok;
916
917 if (ipc == NULL) {
918 return EINVAL;
919 }
920
921 ipc->need_reply = FALSE;
922 ipc->ipc = qb_ipcc_connect(ipc->server_name, crm_ipc_default_buffer_size());
923 if (ipc->ipc == NULL) {
924 return errno;
925 }
926
927 rc = qb_ipcc_fd_get(ipc->ipc, &ipc->pfd.fd);
928 if (rc < 0) { // -errno
929 crm_ipc_close(ipc);
930 return -rc;
931 }
932
933 rc = pcmk__daemon_user(&cl_uid, &cl_gid);
934 if (rc != pcmk_rc_ok) {
935 crm_ipc_close(ipc);
936 return rc;
937 }
938
939 rc = is_ipc_provider_expected(ipc->ipc, ipc->pfd.fd, cl_uid, cl_gid,
940 &found_pid, &found_uid, &found_gid);
941 if (rc != pcmk_rc_ok) {
942 if (rc == pcmk_rc_ipc_unauthorized) {
943 pcmk__info("%s IPC provider authentication failed: process %lld "
944 "has uid %lld (expected %lld) and gid %lld (expected "
945 "%lld)",
946 ipc->server_name,
947 (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
948 (long long) found_uid, (long long) cl_uid,
949 (long long) found_gid, (long long) cl_gid);
950 }
951 crm_ipc_close(ipc);
952 return rc;
953 }
954
955 return pcmk_rc_ok;
956 }
957
958 void
959 crm_ipc_close(crm_ipc_t * client)
960 {
961 if (client) {
962 if (client->ipc) {
963 qb_ipcc_connection_t *ipc = client->ipc;
964
965 client->ipc = NULL;
966 qb_ipcc_disconnect(ipc);
967 }
968 }
969 }
970
971 void
972 crm_ipc_destroy(crm_ipc_t * client)
973 {
974 if (client) {
975 if (client->ipc && qb_ipcc_is_connected(client->ipc)) {
976 pcmk__notice("Destroying active %s IPC connection",
977 client->server_name);
978 /* The next line is basically unsafe
979 *
980 * If this connection was attached to mainloop and mainloop is active,
981 * the 'disconnected' callback will end up back here and we'll end
982 * up free'ing the memory twice - something that can still happen
983 * even without this if we destroy a connection and it closes before
984 * we call exit
985 */
986 /* crm_ipc_close(client); */
987 } else {
988 pcmk__trace("Destroying inactive %s IPC connection",
989 client->server_name);
990 }
991
992 if (client->buffer != NULL) {
993 pcmk__ipc_free_client_buffer(client);
994 }
995
996 free(client->server_name);
997 free(client);
998 }
999 }
1000
1001 /*!
1002 * \internal
1003 * \brief Get the file descriptor for a generic IPC object
1004 *
1005 * \param[in,out] ipc Generic IPC object to get file descriptor for
1006 * \param[out] fd Where to store file descriptor
1007 *
1008 * \return Standard Pacemaker return code
1009 */
1010 int
1011 pcmk__ipc_fd(crm_ipc_t *ipc, int *fd)
1012 {
1013 if ((ipc == NULL) || (fd == NULL)) {
1014 return EINVAL;
1015 }
1016 if ((ipc->ipc == NULL) || (ipc->pfd.fd < 0)) {
1017 return ENOTCONN;
1018 }
1019 *fd = ipc->pfd.fd;
1020 return pcmk_rc_ok;
1021 }
1022
1023 int
1024 crm_ipc_get_fd(crm_ipc_t * client)
1025 {
1026 int fd = -1;
1027
1028 if (pcmk__ipc_fd(client, &fd) != pcmk_rc_ok) {
1029 pcmk__err("Could not obtain file descriptor for %s IPC",
1030 ((client == NULL)? "unspecified" : client->server_name));
1031 errno = EINVAL;
1032 return -EINVAL;
1033 }
1034 return fd;
1035 }
1036
1037 bool
1038 crm_ipc_connected(crm_ipc_t * client)
1039 {
1040 bool rc = FALSE;
1041
1042 if (client == NULL) {
1043 pcmk__trace("No client");
1044 return FALSE;
1045
1046 } else if (client->ipc == NULL) {
1047 pcmk__trace("No connection");
1048 return FALSE;
1049
1050 } else if (client->pfd.fd < 0) {
1051 pcmk__trace("Bad descriptor");
1052 return FALSE;
1053 }
1054
1055 rc = qb_ipcc_is_connected(client->ipc);
1056 if (rc == FALSE) {
1057 client->pfd.fd = -EINVAL;
1058 }
1059 return rc;
1060 }
1061
1062 /*!
1063 * \brief Check whether an IPC connection is ready to be read
1064 *
1065 * \param[in,out] client Connection to check
1066 *
1067 * \return Positive value if ready to be read, 0 if not ready, -errno on error
1068 */
1069 int
1070 crm_ipc_ready(crm_ipc_t *client)
1071 {
1072 int rc;
1073
1074 pcmk__assert(client != NULL);
1075
1076 if (!crm_ipc_connected(client)) {
1077 return -ENOTCONN;
1078 }
1079
1080 client->pfd.revents = 0;
1081 rc = poll(&(client->pfd), 1, 0);
1082 return (rc < 0)? -errno : rc;
1083 }
1084
1085 long
1086 crm_ipc_read(crm_ipc_t *client)
1087 {
1088 guint8 *buffer = NULL;
1089 long rc = -ENOMSG;
1090
1091 pcmk__assert((client != NULL) && (client->ipc != NULL));
1092 buffer = g_malloc0(crm_ipc_default_buffer_size());
1093
1094 do {
1095 pcmk__ipc_header_t *header = NULL;
1096 ssize_t bytes = qb_ipcc_event_recv(client->ipc, buffer,
1097 crm_ipc_default_buffer_size(), 0);
1098
1099 header = (pcmk__ipc_header_t *)(void *) buffer;
1100
1101 if (bytes <= 0) {
1102 pcmk__trace("No message received from %s IPC: %s",
1103 client->server_name, strerror(-bytes));
1104
1105 if (!crm_ipc_connected(client) || bytes == -ENOTCONN) {
1106 pcmk__err("Connection to %s IPC failed", client->server_name);
1107 rc = -ENOTCONN;
1108 pcmk__ipc_free_client_buffer(client);
1109
1110 } else if (bytes == -EAGAIN) {
1111 rc = -EAGAIN;
1112 }
1113
1114 goto done;
1115 }
1116
1117 if (bytes != header->size + sizeof(pcmk__ipc_header_t)) {
1118 pcmk__err("Message size does not match header");
1119 rc = -EBADMSG;
1120 pcmk__ipc_free_client_buffer(client);
1121 goto done;
1122 }
1123
1124 pcmk__trace("Received %s IPC event %" PRId32 " size=%" PRIu32 " rc=%zu",
1125 client->server_name, header->qb.id, header->qb.size, bytes);
1126
1127 rc = pcmk__ipc_msg_append(&client->buffer, buffer);
1128
1129 if (rc == pcmk_rc_ok) {
1130 break;
1131 } else if (rc == pcmk_rc_ipc_more) {
1132 continue;
1133 } else {
1134 pcmk__ipc_free_client_buffer(client);
1135 rc = pcmk_rc2legacy(rc);
1136 goto done;
1137 }
1138 } while (true);
1139
1140 if (client->buffer->len > 0) {
1141 /* Data length excluding the header */
1142 rc = client->buffer->len - sizeof(pcmk__ipc_header_t);
1143 }
1144
1145 done:
1146 g_free(buffer);
1147 return rc;
1148 }
1149
1150 void
1151 pcmk__ipc_free_client_buffer(crm_ipc_t *client)
1152 {
1153 pcmk__assert(client != NULL);
1154
1155 if (client->buffer != NULL) {
1156 g_byte_array_free(client->buffer, TRUE);
1157 client->buffer = NULL;
1158 }
1159 }
1160
1161 const char *
1162 crm_ipc_buffer(crm_ipc_t * client)
1163 {
1164 pcmk__assert(client != NULL);
1165 CRM_CHECK(client->buffer != NULL, return NULL);
1166 return (const char *) (client->buffer->data + sizeof(pcmk__ipc_header_t));
1167 }
1168
1169 uint32_t
1170 crm_ipc_buffer_flags(crm_ipc_t * client)
1171 {
1172 pcmk__ipc_header_t *header = NULL;
1173
1174 pcmk__assert(client != NULL);
1175 if (client->buffer == NULL) {
1176 return 0;
1177 }
1178
1179 header = (pcmk__ipc_header_t *)(void*) client->buffer->data;
1180 return header->flags;
1181 }
1182
1183 const char *
1184 crm_ipc_name(crm_ipc_t * client)
1185 {
1186 pcmk__assert(client != NULL);
1187 return client->server_name;
1188 }
1189
1190 // \return Standard Pacemaker return code
1191 static int
1192 internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout,
1193 ssize_t *bytes, xmlNode **reply)
1194 {
1195 guint8 *buffer = NULL;
1196 pcmk__ipc_header_t *hdr = NULL;
1197 time_t timeout = 0;
1198 int32_t qb_timeout = -1;
1199 int rc = pcmk_rc_ok;
1200 int reply_id = 0;
1201
1202 if (ms_timeout > 0) {
1203 timeout = time(NULL) + 1 + pcmk__timeout_ms2s(ms_timeout);
1204 qb_timeout = 1000;
1205 }
1206
1207 /* get the reply */
1208 pcmk__trace("Expecting reply to %s IPC message %d", client->server_name,
1209 request_id);
1210
1211 buffer = g_malloc0(crm_ipc_default_buffer_size());
1212
1213 do {
1214 guint8 *data = NULL;
1215 xmlNode *xml = NULL;
1216
1217 *bytes = qb_ipcc_recv(client->ipc, buffer, crm_ipc_default_buffer_size(),
1218 qb_timeout);
1219
1220 hdr = (pcmk__ipc_header_t *) (void *) buffer;
1221
1222 if (*bytes <= 0) {
1223 if (!crm_ipc_connected(client)) {
1224 pcmk__err("%s IPC provider disconnected while waiting for "
1225 "message %d",
1226 client->server_name, request_id);
1227 break;
1228 }
1229
1230 continue;
1231
1232 } else if (*bytes != hdr->size + sizeof(pcmk__ipc_header_t)) {
1233 pcmk__err("Message size does not match header");
1234 *bytes = -EBADMSG;
1235 break;
1236 }
1237
1238 reply_id = hdr->qb.id;
1239
1240 if (reply_id == request_id) {
1241 /* Got the reply we were expecting. */
1242 rc = pcmk__ipc_msg_append(&client->buffer, buffer);
1243
1244 if (rc == pcmk_rc_ok) {
1245 break;
1246 } else if (rc == pcmk_rc_ipc_more) {
1247 continue;
1248 } else {
1249 goto done;
1250 }
1251 }
1252
1253 data = buffer + sizeof(pcmk__ipc_header_t);
1254 xml = pcmk__xml_parse((const char *) data);
1255
1256 if (reply_id < request_id) {
1257 pcmk__err("Discarding old reply %d (need %d)", reply_id,
1258 request_id);
1259 pcmk__log_xml_notice(xml, "OldIpcReply");
1260
1261 } else if (reply_id > request_id) {
1262 pcmk__err("Discarding newer reply %d (need %d)", reply_id,
1263 request_id);
1264 pcmk__log_xml_notice(xml, "ImpossibleReply");
1265 pcmk__assert(hdr->qb.id <= request_id);
1266 }
1267 } while (time(NULL) < timeout || (timeout == 0 && *bytes == -EAGAIN));
1268
1269 if (*bytes < 0) {
1270 rc = (int) -*bytes; // System errno
1271 pcmk__trace("%s reply to %s IPC %d: %s " QB_XS " rc=%d",
1272 (client->buffer == NULL) ? "No" : "Incomplete",
1273 client->server_name, request_id, pcmk_rc_str(rc), rc);
1274 } else if ((client->buffer != NULL) && (client->buffer->len > 0)) {
1275 pcmk__trace("Received %u-byte reply %d to %s IPC %d: %.100s",
1276 client->buffer->len, reply_id, client->server_name,
1277 request_id, crm_ipc_buffer(client));
1278
1279 if (reply != NULL) {
1280 *reply = pcmk__xml_parse(crm_ipc_buffer(client));
1281 }
1282 }
1283 /* If bytes == 0, we'll return that to crm_ipc_send which will interpret
1284 * that as pcmk_rc_ok, log that the IPC request failed (since we did not
1285 * give it a valid reply), and return that 0 to its callers. It's up to
1286 * the callers to take appropriate action after that.
1287 */
1288
1289 /* Once we've parsed the client buffer as XML and saved it to reply,
1290 * there's no need to keep the client buffer around anymore. Free it here
1291 * to avoid having to do this anywhere crm_ipc_send is called.
1292 */
1293 done:
1294 pcmk__ipc_free_client_buffer(client);
1295 g_free(buffer);
1296 return rc;
1297 }
1298
1299 static int
1300 discard_old_replies(crm_ipc_t *client, int32_t ms_timeout)
1301 {
1302 pcmk__ipc_header_t *header = NULL;
1303 int rc = pcmk_rc_ok;
1304 ssize_t qb_rc = 0;
1305 char *buffer = pcmk__assert_alloc(crm_ipc_default_buffer_size(),
1306 sizeof(char));
1307
1308 qb_rc = qb_ipcc_recv(client->ipc, buffer, crm_ipc_default_buffer_size(),
1309 ms_timeout);
1310
1311 if (qb_rc < 0) {
1312 pcmk__warn("Sending %s IPC disabled until pending reply received",
1313 client->server_name);
1314 rc = EALREADY;
1315 goto done;
1316 }
1317
1318 header = (pcmk__ipc_header_t *)(void *) buffer;
1319
1320 if (!pcmk__valid_ipc_header(header)) {
1321 rc = EBADMSG;
1322
1323 } else if (!pcmk__is_set(header->flags, crm_ipc_multipart)
1324 || pcmk__is_set(header->flags, crm_ipc_multipart_end)) {
1325
1326 pcmk__notice("Sending %s IPC re-enabled after pending reply received",
1327 client->server_name);
1328 client->need_reply = FALSE;
1329
1330 } else {
1331 pcmk__warn("Sending %s IPC disabled until multipart IPC message reply "
1332 "received", client->server_name);
1333 rc = EALREADY;
1334 }
1335
1336 done:
1337 free(buffer);
1338 return rc;
1339 }
1340
1341 /*!
1342 * \brief Send an IPC XML message
1343 *
1344 * \param[in,out] client Connection to IPC server
1345 * \param[in] message XML message to send
1346 * \param[in] flags Bitmask of crm_ipc_flags
1347 * \param[in] ms_timeout Give up if not sent within this much time
1348 * (5 seconds if 0, or no timeout if negative)
1349 * \param[out] reply Reply from server (or NULL if none)
1350 *
1351 * \return Negative errno on error, otherwise size of reply received in bytes
1352 * if reply was needed, otherwise number of bytes sent
1353 */
1354 int
1355 crm_ipc_send(crm_ipc_t *client, const xmlNode *message,
1356 enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply)
1357 {
1358 int rc = 0;
1359 ssize_t bytes = 0;
1360 ssize_t sent_bytes = 0;
1361 struct iovec *iov = NULL;
1362 static uint32_t id = 0;
1363 pcmk__ipc_header_t *header;
1364 GString *iov_buffer = NULL;
1365 uint16_t index = 0;
1366
1367 if (client == NULL) {
1368 pcmk__notice("Can't send IPC request without connection (bug?): %.100s",
1369 message);
1370 return -ENOTCONN;
1371
1372 } else if (!crm_ipc_connected(client)) {
1373 /* Don't even bother */
1374 pcmk__notice("Can't send %s IPC requests: Connection closed",
1375 client->server_name);
1376 return -ENOTCONN;
1377 }
1378
1379 if (ms_timeout == 0) {
1380 ms_timeout = 5000;
1381 }
1382
1383 /* This block exists only to clear out any old replies that we haven't
1384 * yet read. We don't care about their contents since it's too late to
1385 * do anything with them, so we just read and throw them away.
1386 */
1387 if (client->need_reply) {
1388 int discard_rc = discard_old_replies(client, ms_timeout);
1389
1390 if (discard_rc != pcmk_rc_ok) {
1391 return pcmk_rc2legacy(discard_rc);
1392 }
1393 }
1394
1395 id++;
1396 CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */
1397
1398 iov_buffer = g_string_sized_new(1024);
1399 pcmk__xml_string(message, 0, iov_buffer, 0);
1400
1401 do {
1402 ssize_t qb_rc = 0;
1403 time_t timeout = 0;
1404
1405 rc = pcmk__ipc_prepare_iov(id, iov_buffer, index, &iov, &bytes);
1406
1407 if ((rc != pcmk_rc_ok) && (rc != pcmk_rc_ipc_more)) {
1408 pcmk__warn("Couldn't prepare %s IPC request: %s " QB_XS " rc=%d",
1409 client->server_name, pcmk_rc_str(rc), rc);
1410 g_string_free(iov_buffer, TRUE);
1411 return pcmk_rc2legacy(rc);
1412 }
1413
1414 header = iov[0].iov_base;
1415 pcmk__set_ipc_flags(header->flags, client->server_name, flags);
1416
1417 if (pcmk__is_set(flags, crm_ipc_proxied)) {
1418 /* Don't look for a synchronous response */
1419 pcmk__clear_ipc_flags(flags, "client", crm_ipc_client_response);
1420 }
1421
1422 if (pcmk__is_set(header->flags, crm_ipc_multipart)) {
1423 bool is_end = pcmk__is_set(header->flags, crm_ipc_multipart_end);
1424
1425 pcmk__trace("Sending %s IPC request %" PRId32 " "
1426 "(%spart %" PRIu16 ") of %" PRId32 " bytes "
1427 "using %dms timeout",
1428 client->server_name, header->qb.id,
1429 (is_end ? "final " : ""), index, header->qb.size,
1430 ms_timeout);
1431 pcmk__trace("Text = %s", (char *) iov[1].iov_base);
1432
1433 } else {
1434 pcmk__trace("Sending %s IPC request %" PRId32 " "
1435 "of %" PRId32 " bytes using %dms timeout",
1436 client->server_name, header->qb.id, header->qb.size,
1437 ms_timeout);
1438 pcmk__trace("Text = %s", (char *) iov[1].iov_base);
1439 }
1440
1441 /* Send the IPC request, respecting any timeout we were passed */
1442 if (ms_timeout > 0) {
1443 timeout = time(NULL) + 1 + pcmk__timeout_ms2s(ms_timeout);
1444 }
1445
1446 do {
1447 qb_rc = qb_ipcc_sendv(client->ipc, iov, 2);
1448 } while ((qb_rc == -EAGAIN) && ((timeout == 0) || (time(NULL) < timeout)));
1449
1450 /* An error occurred when sending. */
1451 if (qb_rc <= 0) {
1452 rc = (int) qb_rc; // Negative of system errno
1453 goto send_cleanup;
1454 }
1455
1456 /* Sending succeeded. The next action depends on whether this was a
1457 * multipart IPC message or not.
1458 */
1459 if (rc == pcmk_rc_ok) {
1460 /* This was either a standalone IPC message or the last part of
1461 * a multipart message. Set the return value and break out of
1462 * this processing loop.
1463 */
1464 sent_bytes += qb_rc;
1465 rc = (int) sent_bytes;
1466 break;
1467 } else {
1468 /* There's no way to get here for any value other than rc == pcmk_rc_more
1469 * given the check right after pcmk__ipc_prepare_iov.
1470 *
1471 * This was a multipart message, loop to process the next chunk.
1472 */
1473 sent_bytes += qb_rc;
1474 index++;
1475 }
1476
1477 g_clear_pointer(&iov, pcmk_free_ipc_event);
1478 } while (true);
1479
1480 /* If we should not wait for a response, bail now */
1481 if (!pcmk__is_set(flags, crm_ipc_client_response)) {
1482 pcmk__trace("Not waiting for reply to %s IPC request %d",
1483 client->server_name, header->qb.id);
1484 goto send_cleanup;
1485 }
1486
1487 pcmk__ipc_free_client_buffer(client);
1488 rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout, &bytes, reply);
1489 if (rc == pcmk_rc_ok) {
1490 rc = (int) bytes; // Size of reply received
1491 } else {
1492 /* rc is either a positive system errno or a negative standard Pacemaker
1493 * return code. If it's an errno, we need to convert it back to a
1494 * negative number for comparison and return at the end of this function.
1495 */
1496 rc = pcmk_rc2legacy(rc);
1497
1498 if (ms_timeout > 0) {
1499 /* We didn't get the reply in time, so disable future sends for now.
1500 * The only alternative would be to close the connection since we
1501 * don't know how to detect and discard out-of-sequence replies.
1502 *
1503 * @TODO Implement out-of-sequence detection
1504 */
1505 client->need_reply = TRUE;
1506 }
1507 }
1508
1509 send_cleanup:
1510 if (!crm_ipc_connected(client)) {
1511 pcmk__notice("Couldn't send %s IPC request %d: Connection closed "
1512 QB_XS " rc=%d",
1513 client->server_name, header->qb.id, rc);
1514
1515 } else if (rc == -ETIMEDOUT) {
1516 pcmk__warn("%s IPC request %d failed: %s after %dms " QB_XS " rc=%d",
1517 client->server_name, header->qb.id, pcmk_strerror(rc),
1518 ms_timeout, rc);
1519 crm_write_blackbox(0, NULL);
1520
1521 } else if (rc <= 0) {
1522 pcmk__warn("%s IPC request %d failed: %s " QB_XS " rc=%d",
1523 client->server_name, header->qb.id,
1524 ((rc == 0)? "No bytes sent" : pcmk_strerror(rc)), rc);
1525 }
1526
1527 g_string_free(iov_buffer, TRUE);
1528 pcmk_free_ipc_event(iov);
1529 // coverity[return_overflow]
1530 return rc;
1531 }
1532
1533 /*!
1534 * \brief Ensure an IPC provider has expected user or group
1535 *
1536 * \param[in] qb_ipc libqb client connection if available
1537 * \param[in] sock Connected Unix socket for IPC
1538 * \param[in] refuid Expected user ID
1539 * \param[in] refgid Expected group ID
1540 * \param[out] gotpid If not NULL, where to store provider's actual process ID
1541 * (or 1 on platforms where ID is not available)
1542 * \param[out] gotuid If not NULL, where to store provider's actual user ID
1543 * \param[out] gotgid If not NULL, where to store provider's actual group ID
1544 *
1545 * \return Standard Pacemaker return code
1546 * \note An actual user ID of 0 (root) will always be considered authorized,
1547 * regardless of the expected values provided. The caller can use the
1548 * output arguments to be stricter than this function.
1549 */
1550 static int
1551 is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
1552 uid_t refuid, gid_t refgid,
1553 pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
1554 {
1555 int rc = EOPNOTSUPP;
1556 pid_t found_pid = 0;
1557 uid_t found_uid = 0;
1558 gid_t found_gid = 0;
1559
1560 #ifdef HAVE_QB_IPCC_AUTH_GET
1561 if (qb_ipc != NULL) {
1562 rc = qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid);
1563 rc = -rc; // libqb returns 0 or -errno
1564 if (rc == pcmk_rc_ok) {
1565 goto found;
1566 }
1567 }
1568 #endif
1569
1570 #ifdef HAVE_UCRED
1571 {
1572 struct ucred ucred;
1573 socklen_t ucred_len = sizeof(ucred);
1574
1575 if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) < 0) {
1576 rc = errno;
1577 } else if (ucred_len != sizeof(ucred)) {
1578 rc = EOPNOTSUPP;
1579 } else {
1580 found_pid = ucred.pid;
1581 found_uid = ucred.uid;
1582 found_gid = ucred.gid;
1583 goto found;
1584 }
1585 }
1586 #endif
1587
1588 #ifdef HAVE_SOCKPEERCRED
1589 {
1590 struct sockpeercred sockpeercred;
1591 socklen_t sockpeercred_len = sizeof(sockpeercred);
1592
1593 if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
1594 &sockpeercred, &sockpeercred_len) < 0) {
1595 rc = errno;
1596 } else if (sockpeercred_len != sizeof(sockpeercred)) {
1597 rc = EOPNOTSUPP;
1598 } else {
1599 found_pid = sockpeercred.pid;
1600 found_uid = sockpeercred.uid;
1601 found_gid = sockpeercred.gid;
1602 goto found;
1603 }
1604 }
1605 #endif
1606
1607 #ifdef HAVE_GETPEEREID // For example, FreeBSD
1608 if (getpeereid(sock, &found_uid, &found_gid) < 0) {
1609 rc = errno;
1610 } else {
1611 found_pid = PCMK__SPECIAL_PID;
1612 goto found;
1613 }
1614 #endif
1615
1616 #ifdef HAVE_GETPEERUCRED
1617 {
1618 ucred_t *ucred = NULL;
1619
1620 if (getpeerucred(sock, &ucred) < 0) {
1621 rc = errno;
1622 } else {
1623 found_pid = ucred_getpid(ucred);
1624 found_uid = ucred_geteuid(ucred);
1625 found_gid = ucred_getegid(ucred);
1626 ucred_free(ucred);
1627 goto found;
1628 }
1629 }
1630 #endif
1631
1632 return rc; // If we get here, nothing succeeded
1633
1634 found:
1635 if (gotpid != NULL) {
1636 *gotpid = found_pid;
1637 }
1638 if (gotuid != NULL) {
1639 *gotuid = found_uid;
1640 }
1641 if (gotgid != NULL) {
1642 *gotgid = found_gid;
1643 }
1644 if ((found_uid != 0) && (found_uid != refuid) && (found_gid != refgid)) {
1645 return pcmk_rc_ipc_unauthorized;
1646 }
1647 return pcmk_rc_ok;
1648 }
1649
1650 int
1651 crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid,
1652 pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
1653 {
1654 int ret = is_ipc_provider_expected(NULL, sock, refuid, refgid,
1655 gotpid, gotuid, gotgid);
1656
1657 /* The old function had some very odd return codes*/
1658 if (ret == 0) {
1659 return 1;
1660 } else if (ret == pcmk_rc_ipc_unauthorized) {
1661 return 0;
1662 } else {
1663 return pcmk_rc2legacy(ret);
1664 }
1665 }
1666
1667 int
1668 pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid,
1669 gid_t refgid, pid_t *gotpid)
1670 {
1671 static char last_asked_name[PATH_MAX / 2] = ""; /* log spam prevention */
1672 int fd;
1673 int rc = pcmk_rc_ipc_unresponsive;
1674 int auth_rc = 0;
1675 int32_t qb_rc;
1676 pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0;
1677 qb_ipcc_connection_t *c;
1678 #ifdef HAVE_QB_IPCC_CONNECT_ASYNC
1679 struct pollfd pollfd = { 0, };
1680 int poll_rc;
1681
1682 c = qb_ipcc_connect_async(name, 0,
1683 &(pollfd.fd));
1684 #else
1685 c = qb_ipcc_connect(name, 0);
1686 #endif
1687 if (c == NULL) {
1688 pcmk__info("Could not connect to %s IPC: %s", name, strerror(errno));
1689 rc = pcmk_rc_ipc_unresponsive;
1690 goto bail;
1691 }
1692 #ifdef HAVE_QB_IPCC_CONNECT_ASYNC
1693 pollfd.events = POLLIN;
1694 do {
1695 poll_rc = poll(&pollfd, 1, 5000);
1696 } while ((poll_rc == -1) && (errno == EINTR));
1697
1698 /* If poll() failed, given that disconnect function is not registered yet,
1699 * qb_ipcc_disconnect() won't clean up the socket. In any case, call
1700 * qb_ipcc_connect_continue() here so that it may fail and do the cleanup
1701 * for us.
1702 */
1703 if (qb_ipcc_connect_continue(c) != 0) {
1704 pcmk__info("Could not connect to %s IPC: %s", name,
1705 ((poll_rc == 0)? "timeout" :strerror(errno)));
1706 rc = pcmk_rc_ipc_unresponsive;
1707 c = NULL; // qb_ipcc_connect_continue cleaned up for us
1708 goto bail;
1709 }
1710 #endif
1711
1712 qb_rc = qb_ipcc_fd_get(c, &fd);
1713 if (qb_rc != 0) {
1714 rc = (int) -qb_rc; // System errno
1715 pcmk__err("Could not get fd from %s IPC: %s " QB_XS " rc=%d",
1716 name, pcmk_rc_str(rc), rc);
1717 goto bail;
1718 }
1719
1720 auth_rc = is_ipc_provider_expected(c, fd, refuid, refgid,
1721 &found_pid, &found_uid, &found_gid);
1722 if (auth_rc == pcmk_rc_ipc_unauthorized) {
1723 pcmk__err("Daemon (IPC %s) effectively blocked with unauthorized "
1724 "process %lld (uid: %lld, gid: %lld)",
1725 name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
1726 (long long) found_uid, (long long) found_gid);
1727 rc = pcmk_rc_ipc_unauthorized;
1728 goto bail;
1729 }
1730
1731 if (auth_rc != pcmk_rc_ok) {
1732 rc = auth_rc;
1733 pcmk__err("Could not get peer credentials from %s IPC: %s "
1734 QB_XS " rc=%d",
1735 name, pcmk_rc_str(rc), rc);
1736 goto bail;
1737 }
1738
1739 if (gotpid != NULL) {
1740 *gotpid = found_pid;
1741 }
1742
1743 rc = pcmk_rc_ok;
1744 if (((found_uid != refuid) || (found_gid != refgid))
1745 && !pcmk__str_eq(name, last_asked_name, pcmk__str_none)) {
1746
1747 if ((found_uid == 0) && (refuid != 0)) {
1748 pcmk__warn("Daemon (IPC %s) runs as root, whereas the expected "
1749 "credentials are %lld:%lld, hazard of violating the "
1750 "least privilege principle",
1751 name, (long long) refuid, (long long) refgid);
1752 } else {
1753 pcmk__notice("Daemon (IPC %s) runs as %lld:%lld, whereas the "
1754 "expected credentials are %lld:%lld, which may "
1755 "mean a different set of privileges than expected",
1756 name, (long long) found_uid, (long long) found_gid,
1757 (long long) refuid, (long long) refgid);
1758 }
1759 memccpy(last_asked_name, name, '\0', sizeof(last_asked_name));
1760 }
1761
1762 bail:
1763 if (c != NULL) {
1764 qb_ipcc_disconnect(c);
1765 }
1766 return rc;
1767 }
1768
1769 // Deprecated functions kept only for backward API compatibility
1770 // LCOV_EXCL_START
1771
1772 #include <crm/common/ipc_client_compat.h>
1773
1774 bool
1775 crm_ipc_connect(crm_ipc_t *client)
1776 {
1777 int rc = pcmk__connect_generic_ipc(client);
1778
1779 if (rc == pcmk_rc_ok) {
1780 return true;
1781 }
1782 if ((client != NULL) && (client->ipc == NULL)) {
1783 errno = (rc > 0)? rc : ENOTCONN;
1784 pcmk__debug("Could not establish %s IPC connection: %s (%d)",
1785 client->server_name, pcmk_rc_str(errno), errno);
1786 } else if (rc == pcmk_rc_ipc_unauthorized) {
1787 pcmk__err("%s IPC provider authentication failed",
1788 (client == NULL)? "Pacemaker" : client->server_name);
1789 errno = ECONNABORTED;
1790 } else {
1791 pcmk__err("Could not verify authenticity of %s IPC provider",
1792 (client == NULL)? "Pacemaker" : client->server_name);
1793 errno = ENOTCONN;
1794 }
1795 return false;
1796 }
1797
1798 // LCOV_EXCL_STOP
1799 // End deprecated API
1800