1    	/*
2    	 * Copyright 2012-2026 the Pacemaker project contributors
3    	 *
4    	 * The version control history for this file may have further details.
5    	 *
6    	 * This source code is licensed under the GNU Lesser General Public License
7    	 * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
8    	 */
9    	
10   	#include <crm_internal.h>
11   	
12   	#include <errno.h>                      // EREMOTEIO, ENOMEM
13   	#include <stdint.h>                     // int32_t, uint32_t
14   	#include <stdlib.h>                     // NULL, free, size_t
15   	#include <sys/types.h>                  // gid_t, uid_t
16   	
17   	#include <glib.h>                       // g_byte_array_free, g_list_*
18   	#include <libxml/tree.h>                // xmlNode
19   	#include <qb/qbipcs.h>                  // qb_ipcs_connection_t
20   	#include <qb/qblog.h>                   // QB_XS
21   	
22   	#include <crm/common/internal.h>
23   	#include <crm/common/ipc.h>             // crm_ipc_flags
24   	#include <crm/common/logging.h>         // CRM_CHECK, CRM_LOG_ASSERT
25   	#include <crm/common/results.h>         // pcmk_rc_*, pcmk_rc_str
26   	#include <crm/crm.h>                    // CRM_SYSTEM_CRMD
27   	#include <crm/lrmd.h>                   // LRMD_IPC_OP_DESTROY
28   	
29   	#include "pacemaker-execd.h"            // lrmd_server_send_notify
30   	
31   	static qb_ipcs_service_t *cib_ro = NULL;
32   	static qb_ipcs_service_t *cib_rw = NULL;
33   	static qb_ipcs_service_t *cib_shm = NULL;
34   	
35   	static qb_ipcs_service_t *attrd_ipcs = NULL;
36   	static qb_ipcs_service_t *crmd_ipcs = NULL;
37   	static qb_ipcs_service_t *fencer_ipcs = NULL;
38   	static qb_ipcs_service_t *pacemakerd_ipcs = NULL;
39   	
40   	// An IPC provider is a cluster node controller connecting as a client
41   	static GList *ipc_providers = NULL;
42   	
43   	
44   	/* ipc clients == things like cibadmin, crm_resource, connecting locally
45   	 *
46   	 * @TODO This should be unnecessary (pcmk__foreach_ipc_client() should be
47   	 * sufficient)
48   	 */
49   	static GHashTable *ipc_clients = NULL;
50   	
51   	/*!
52   	 * \internal
53   	 * \brief Get an IPC proxy provider
54   	 *
55   	 * \return Pointer to a provider if one exists, NULL otherwise
56   	 *
57   	 * \note Grab the first provider, which is the most recent connection. That way,
58   	 *       if we haven't yet timed out an old, failed connection, we don't try to
59   	 *       use it.
60   	 */
61   	pcmk__client_t *
62   	ipc_proxy_get_provider(void)
63   	{
64   	    return ipc_providers? (pcmk__client_t *) (ipc_providers->data) : NULL;
65   	}
66   	
67   	/*!
68   	 * \internal
69   	 * \brief Accept a client connection on a proxy IPC server
70   	 *
71   	 * \param[in,out] c            New connection
72   	 * \param[in]     uid          Client user id
73   	 * \param[in]     gid          Client group id
74   	 * \param[in]     ipc_channel  Name of IPC server to proxy
75   	 *
76   	 * \return 0 on success, -errno on error
77   	 */
78   	static int32_t
79   	ipc_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid, const char *ipc_channel)
80   	{
81   	    pcmk__client_t *client = NULL;
82   	    pcmk__client_t *ipc_proxy = ipc_proxy_get_provider();
83   	    xmlNode *msg = NULL;
84   	
85   	    if (ipc_proxy == NULL) {
86   	        pcmk__warn("Cannot proxy IPC connection from uid %d gid %d to %s "
87   	                   "because not connected to cluster",
88   	                   uid, gid, ipc_channel);
89   	        return -EREMOTEIO;
90   	    }
91   	
92   	    /* This new client is a local IPC client on a Pacemaker Remote controlled
93   	     * node, needing to access cluster node IPC services.
94   	     */
95   	    client = pcmk__new_client(c, uid, gid);
96   	    if (client == NULL) {
97   	        return -ENOMEM;
98   	    }
99   	
100  	    /* This ipc client is bound to a single ipc provider. If the
101  	     * provider goes away, this client is disconnected */
102  	    client->userdata = pcmk__str_copy(ipc_proxy->id);
103  	    client->name = pcmk__assert_asprintf("proxy-%s-%d-%.8s", ipc_channel,
104  	                                         client->pid, client->id);
105  	
106  	    /* Allow remote executor to distinguish between proxied local clients and
107  	     * actual executor API clients
108  	     */
109  	    pcmk__set_client_flags(client, pcmk__client_to_proxy);
110  	
111  	    g_hash_table_insert(ipc_clients, client->id, client);
112  	
113  	    msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
114  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_NEW);
115  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SERVER, ipc_channel);
116  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
117  	    lrmd_server_send_notify(ipc_proxy, msg);
118  	    pcmk__xml_free(msg);
119  	    pcmk__debug("Accepted IPC proxy connection (session ID %s) from uid %d "
120  	                "gid %d on channel %s",
121  	                client->id, uid, gid, ipc_channel);
122  	    return 0;
123  	}
124  	
125  	static int32_t
126  	crmd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
127  	{
128  	    return ipc_proxy_accept(c, uid, gid, CRM_SYSTEM_CRMD);
129  	}
130  	
131  	static int32_t
132  	attrd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
133  	{
134  	    return ipc_proxy_accept(c, uid, gid, PCMK__VALUE_ATTRD);
135  	}
136  	
137  	static int32_t
138  	fencer_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
139  	{
140  	    return ipc_proxy_accept(c, uid, gid, "stonith-ng");
141  	}
142  	
143  	static int32_t
144  	pacemakerd_proxy_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
145  	{
146  	    return -EREMOTEIO;
147  	}
148  	
149  	static int32_t
150  	cib_proxy_accept_rw(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
151  	{
152  	    return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RW);
153  	}
154  	
155  	static int32_t
156  	cib_proxy_accept_ro(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
157  	{
158  	    return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RO);
159  	}
160  	
161  	int
162  	ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml)
163  	{
164  	    const char *session = pcmk__xe_get(xml, PCMK__XA_LRMD_IPC_SESSION);
165  	    const char *msg_type = pcmk__xe_get(xml, PCMK__XA_LRMD_IPC_OP);
166  	
167  	    xmlNode *wrapper = pcmk__xe_first_child(xml, PCMK__XE_LRMD_IPC_MSG, NULL,
168  	                                            NULL);
169  	    xmlNode *msg = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
170  	
171  	    pcmk__client_t *ipc_client = NULL;
172  	    int rc = pcmk_rc_ok;
173  	
174  	    if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_ACK, pcmk__str_casei)) {
175  	        handle_shutdown_ack();
176  	        return rc;
177  	    }
178  	
179  	    if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_NACK, pcmk__str_casei)) {
180  	        handle_shutdown_nack();
181  	        return rc;
182  	    }
183  	
184  	    ipc_client = pcmk__find_client_by_id(session);
185  	    if (ipc_client == NULL) {
186  	        xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
187  	        pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY);
188  	        pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, session);
189  	        lrmd_server_send_notify(ipc_proxy, msg);
190  	        pcmk__xml_free(msg);
191  	        return rc;
192  	    }
193  	
194  	    /* This is an event or response from the ipc provider
195  	     * going to the local ipc client.
196  	     *
197  	     * Looking at the chain of events.
198  	     *
199  	     * -----remote node----------------|---- cluster node ------
200  	     * ipc_client <--1--> this code
201  	     *    <--2--> pacemaker-controld:controld_remote_proxy_cb
202  	     *            /remote_proxy_relay_event()
203  	     *    <--3--> ipc server
204  	     *
205  	     * This function is receiving a msg from connection 2
206  	     * and forwarding it to connection 1.
207  	     */
208  	
209  	    if (pcmk__str_eq(msg_type, LRMD_IPC_OP_EVENT, pcmk__str_casei)) {
210  	        pcmk__trace("Sending event to %s", ipc_client->id);
211  	        rc = pcmk__ipc_send_xml(ipc_client, 0, msg, crm_ipc_server_event);
212  	
213  	    } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_RESPONSE, pcmk__str_casei)) {
214  	        int msg_id = 0;
215  	
216  	        pcmk__xe_get_int(xml, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id);
217  	
218  	        pcmk__trace("Sending response to %d - %s", ipc_client->request_id,
219  	                    ipc_client->id);
220  	        rc = pcmk__ipc_send_xml(ipc_client, msg_id, msg, crm_ipc_flags_none);
221  	
222  	        CRM_LOG_ASSERT(msg_id == ipc_client->request_id);
223  	        ipc_client->request_id = 0;
224  	
225  	    } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_DESTROY, pcmk__str_casei)) {
226  	        qb_ipcs_disconnect(ipc_client->ipcs);
227  	
228  	    } else {
229  	        pcmk__err("Unknown ipc proxy msg type %s" , msg_type);
230  	    }
231  	
232  	    if (rc != pcmk_rc_ok) {
233  	        pcmk__warn("Could not proxy IPC to client %s: %s " QB_XS " rc=%d",
234  	                   ipc_client->id, pcmk_rc_str(rc), rc);
235  	    }
236  	
237  	    return rc;
238  	}
239  	
240  	/*!
241  	 * \internal
242  	 * \brief Handle a message from an IPC connection
243  	 *
244  	 * \param[in,out] c     Established IPC connection
245  	 * \param[in]     data  The message data read from the connection - this can be
246  	 *                      a complete IPC message or just a part of one if it's
247  	 *                      very large
248  	 * \param[size]   size  Unused
249  	 *
250  	 * \return 0 in all cases
251  	 */
252  	static int32_t
253  	ipc_proxy_dispatch(qb_ipcs_connection_t *c, void *data, size_t size)
254  	{
255  	    int rc = pcmk_rc_ok;
256  	    uint32_t id = 0;
257  	    uint32_t flags = 0;
258  	    pcmk__client_t *client = pcmk__find_client(c);
259  	    pcmk__client_t *ipc_proxy = NULL;
260  	    xmlNode *wrapper = NULL;
261  	    xmlNode *request = NULL;
262  	    xmlNode *msg = NULL;
263  	
264  	    // Sanity-check, and parse XML from IPC data
265  	    CRM_CHECK(client != NULL, return 0);
266  	    if (data == NULL) {
267  	        pcmk__debug("No IPC data from PID %d", pcmk__client_pid(c));
268  	        return 0;
269  	    }
270  	
271  	    ipc_proxy = pcmk__find_client_by_id(client->userdata);
272  	    if (ipc_proxy == NULL) {
273  	        qb_ipcs_disconnect(client->ipcs);
274  	        return 0;
275  	    }
276  	
277  	    /* This is a request from the local ipc client going
278  	     * to the ipc provider.
279  	     *
280  	     * Looking at the chain of events.
281  	     *
282  	     * -----remote node----------------|---- cluster node ------
283  	     * ipc_client <--1--> this code
284  	     *     <--2--> pacemaker-controld:remote_proxy_dispatch_internal()
285  	     *     <--3--> ipc server
286  	     *
287  	     * This function is receiving a request from connection
288  	     * 1 and forwarding it to connection 2.
289  	     */
290  	    rc = pcmk__ipc_msg_append(&client->buffer, data);
291  	
292  	    if (rc == pcmk_rc_ipc_more) {
293  	        /* We haven't read the complete message yet, so just return. */
294  	        return 0;
295  	
296  	    } else if (rc == pcmk_rc_ok) {
297  	        /* We've read the complete message and there's already a header on
298  	         * the front.  Pass it off for processing.
299  	         */
300  	        request = pcmk__client_data2xml(client, &id, &flags);
301  	        g_byte_array_free(client->buffer, TRUE);
302  	        client->buffer = NULL;
303  	
304  	    } else {
305  	        /* Some sort of error occurred reassembling the message.  All we can
306  	         * do is clean up, log an error and return.
307  	         */
308  	        pcmk__err("Error when reading IPC message: %s", pcmk_rc_str(rc));
309  	
310  	        if (client->buffer != NULL) {
311  	            g_byte_array_free(client->buffer, TRUE);
312  	            client->buffer = NULL;
313  	        }
314  	
315  	        return 0;
316  	    }
317  	
318  	    if (request == NULL) {
319  	        return 0;
320  	    }
321  	
322  	    /* This ensures that synced request/responses happen over the event channel
323  	     * in the controller, allowing the controller to process the messages async.
324  	     */
325  	    pcmk__set_ipc_flags(flags, pcmk__client_name(client), crm_ipc_proxied);
326  	    client->request_id = id;
327  	
328  	    msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
329  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST);
330  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
331  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_CLIENT, pcmk__client_name(client));
332  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_USER, client->user);
333  	    pcmk__xe_set_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, id);
334  	
335  	    // @TODO Use different setter for uint32_t
336  	    pcmk__xe_set_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, flags);
337  	
338  	    wrapper = pcmk__xe_create(msg, PCMK__XE_LRMD_IPC_MSG);
339  	
340  	    pcmk__xml_copy(wrapper, request);
341  	
342  	    lrmd_server_send_notify(ipc_proxy, msg);
343  	
344  	    pcmk__xml_free(request);
345  	    pcmk__xml_free(msg);
346  	    return 0;
347  	}
348  	
349  	/*!
350  	 * \internal
351  	 * \brief Notify a proxy provider that we wish to shut down
352  	 *
353  	 * \param[in,out] ipc_proxy  IPC client connection to proxy provider
354  	 *
355  	 * \return 0 on success, -1 on error
356  	 */
357  	int
358  	ipc_proxy_shutdown_req(pcmk__client_t *ipc_proxy)
359  	{
360  	    xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
361  	    int rc;
362  	
363  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ);
364  	
365  	    /* We don't really have a session, but the controller needs this attribute
366  	     * to recognize this as proxy communication.
367  	     */
368  	    pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, "0");
369  	
370  	    rc = (lrmd_server_send_notify(ipc_proxy, msg) != pcmk_rc_ok)? -1 : 0;
371  	    pcmk__xml_free(msg);
372  	    return rc;
373  	}
374  	
375  	/*!
376  	 * \internal
377  	 * \brief Destroy a client IPC connection
378  	 *
379  	 * \param[in] c  Connection to destroy
380  	 *
381  	 * \return 0 (i.e. do not re-run this callback)
382  	 */
383  	static int32_t
384  	ipc_proxy_closed(qb_ipcs_connection_t *c)
385  	{
386  	    pcmk__client_t *client = pcmk__find_client(c);
387  	
388  	    if (client == NULL) {
389  	        pcmk__trace("Ignoring request to clean up unknown connection %p", c);
390  	    } else {
391  	        pcmk__client_t *ipc_proxy = pcmk__find_client_by_id(client->userdata);
392  	
393  	        pcmk__trace("Cleaning up closed client connection %p", c);
394  	
395  	        if (ipc_proxy != NULL) {
396  	            xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY);
397  	            pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY);
398  	            pcmk__xe_set(msg, PCMK__XA_LRMD_IPC_SESSION, client->id);
399  	            lrmd_server_send_notify(ipc_proxy, msg);
400  	            pcmk__xml_free(msg);
401  	        }
402  	
403  	        g_hash_table_remove(ipc_clients, client->id);
404  	        g_clear_pointer(&client->userdata, free);
405  	        pcmk__free_client(client);
406  	    }
407  	
408  	    return 0;
409  	}
410  	
411  	/*!
412  	 * \internal
413  	 * \brief Destroy a client IPC connection
414  	 *
415  	 * \param[in] c  Connection to destroy
416  	 *
417  	 * \note We handle a destroyed connection the same as a closed one,
418  	 *       but we need a separate handler because the return type is different.
419  	 */
420  	static void
421  	ipc_proxy_destroy(qb_ipcs_connection_t *c)
422  	{
423  	    pcmk__trace("Destroying client connection %p", c);
424  	    ipc_proxy_closed(c);
425  	}
426  	
427  	static struct qb_ipcs_service_handlers crmd_proxy_callbacks = {
428  	    .connection_accept = crmd_proxy_accept,
429  	    .connection_created = NULL,
430  	    .msg_process = ipc_proxy_dispatch,
431  	    .connection_closed = ipc_proxy_closed,
432  	    .connection_destroyed = ipc_proxy_destroy
433  	};
434  	
435  	static struct qb_ipcs_service_handlers attrd_proxy_callbacks = {
436  	    .connection_accept = attrd_proxy_accept,
437  	    .connection_created = NULL,
438  	    .msg_process = ipc_proxy_dispatch,
439  	    .connection_closed = ipc_proxy_closed,
440  	    .connection_destroyed = ipc_proxy_destroy
441  	};
442  	
443  	static struct qb_ipcs_service_handlers fencer_proxy_callbacks = {
444  	    .connection_accept = fencer_proxy_accept,
445  	    .connection_created = NULL,
446  	    .msg_process = ipc_proxy_dispatch,
447  	    .connection_closed = ipc_proxy_closed,
448  	    .connection_destroyed = ipc_proxy_destroy
449  	};
450  	
451  	static struct qb_ipcs_service_handlers pacemakerd_proxy_callbacks = {
452  	    .connection_accept = pacemakerd_proxy_accept,
453  	    .connection_created = NULL,
454  	    .msg_process = NULL,
455  	    .connection_closed = NULL,
456  	    .connection_destroyed = NULL
457  	};
458  	
459  	static struct qb_ipcs_service_handlers cib_proxy_callbacks_ro = {
460  	    .connection_accept = cib_proxy_accept_ro,
461  	    .connection_created = NULL,
462  	    .msg_process = ipc_proxy_dispatch,
463  	    .connection_closed = ipc_proxy_closed,
464  	    .connection_destroyed = ipc_proxy_destroy
465  	};
466  	
467  	static struct qb_ipcs_service_handlers cib_proxy_callbacks_rw = {
468  	    .connection_accept = cib_proxy_accept_rw,
469  	    .connection_created = NULL,
470  	    .msg_process = ipc_proxy_dispatch,
471  	    .connection_closed = ipc_proxy_closed,
472  	    .connection_destroyed = ipc_proxy_destroy
473  	};
474  	
475  	void
476  	ipc_proxy_add_provider(pcmk__client_t *ipc_proxy)
477  	{
478  	    // Prepending ensures the most recent connection is always first
479  	    ipc_providers = g_list_prepend(ipc_providers, ipc_proxy);
480  	}
481  	
482  	void
483  	ipc_proxy_remove_provider(pcmk__client_t *ipc_proxy)
484  	{
485  	    GHashTableIter iter;
486  	    pcmk__client_t *ipc_client = NULL;
487  	    char *key = NULL;
488  	    GList *remove_these = NULL;
489  	    GList *gIter = NULL;
490  	
491  	    ipc_providers = g_list_remove(ipc_providers, ipc_proxy);
492  	
493  	    g_hash_table_iter_init(&iter, ipc_clients);
494  	    while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & ipc_client)) {
495  	        const char *proxy_id = ipc_client->userdata;
496  	        if (pcmk__str_eq(proxy_id, ipc_proxy->id, pcmk__str_casei)) {
497  	            pcmk__info("IPC proxy connection for client %s pid %d destroyed "
498  	                       "because cluster node disconnected",
499  	                       ipc_client->id, ipc_client->pid);
500  	            /* we can't remove during the iteration, so copy items
501  	             * to a list we can destroy later */
502  	            remove_these = g_list_append(remove_these, ipc_client);
503  	        }
504  	    }
505  	
506  	    for (gIter = remove_these; gIter != NULL; gIter = gIter->next) {
507  	        ipc_client = gIter->data;
508  	
509  	        // Disconnection callback will free the client here
510  	        qb_ipcs_disconnect(ipc_client->ipcs);
511  	    }
512  	
513  	    /* just frees the list, not the elements in the list */
514  	    g_list_free(remove_these);
515  	}
516  	
517  	void
518  	ipc_proxy_init(void)
519  	{
520  	    ipc_clients = pcmk__strkey_table(NULL, NULL);
521  	
522  	    pcmk__serve_based_ipc(&cib_ro, &cib_rw, &cib_shm, &cib_proxy_callbacks_ro,
523  	                          &cib_proxy_callbacks_rw);
524  	    pcmk__serve_attrd_ipc(&attrd_ipcs, &attrd_proxy_callbacks);
525  	    pcmk__serve_fenced_ipc(&fencer_ipcs, &fencer_proxy_callbacks);
526  	    pcmk__serve_pacemakerd_ipc(&pacemakerd_ipcs, &pacemakerd_proxy_callbacks);
527  	    crmd_ipcs = pcmk__serve_controld_ipc(&crmd_proxy_callbacks);
528  	    if (crmd_ipcs == NULL) {
529  	        pcmk__err("Failed to create controller: exiting and inhibiting "
530  	                  "respawn");
531  	        pcmk__warn("Verify pacemaker and pacemaker_remote are not both "
532  	                   "enabled");
533  	        crm_exit(CRM_EX_FATAL);
534  	    }
535  	}
536  	
537  	void
538  	ipc_proxy_cleanup(void)
539  	{
(1) Event path: Condition "_p", taking true branch.
540  	    g_clear_pointer(&ipc_providers, g_list_free);
(2) Event path: Condition "_p", taking true branch.
541  	    g_clear_pointer(&ipc_clients, g_hash_table_destroy);
542  	
543  	    pcmk__stop_based_ipc(cib_ro, cib_rw, cib_shm);
544  	
CID (unavailable; MK=49bb0336a49fb5b26f369ed9941b49f8) (#3 of 6): Inconsistent C union access (INCONSISTENT_UNION_ACCESS):
(3) Event assign_union_field: The union field "in" of "_pp" is written.
(4) Event inconsistent_union_field_access: In "_pp.out", the union field used: "out" is inconsistent with the field most recently stored: "in".
545  	    g_clear_pointer(&attrd_ipcs, qb_ipcs_destroy);
546  	    g_clear_pointer(&fencer_ipcs, qb_ipcs_destroy);
547  	    g_clear_pointer(&pacemakerd_ipcs, qb_ipcs_destroy);
548  	    g_clear_pointer(&crmd_ipcs, qb_ipcs_destroy);
549  	
550  	    cib_ro = NULL;
551  	    cib_rw = NULL;
552  	    cib_shm = NULL;
553  	}
554