1    	# pylint: disable=too-many-lines
2    	import contextlib
3    	import datetime
4    	import json
5    	import math
6    	import os
7    	import subprocess
8    	import sys
9    	import tempfile
10   	import time
11   	import xml.dom.minidom
12   	from typing import Any, Callable, Iterable, Mapping, Optional, Union, cast
13   	from xml.parsers.expat import ExpatError
14   	
15   	import pcs.lib.pacemaker.live as lib_pacemaker
16   	from pcs import settings, utils
17   	from pcs.cli.common import parse_args
18   	from pcs.cli.common.errors import (
19   	    ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
20   	    CmdLineInputError,
21   	)
22   	from pcs.cli.common.parse_args import (
23   	    OUTPUT_FORMAT_VALUE_CMD,
24   	    OUTPUT_FORMAT_VALUE_JSON,
25   	    Argv,
26   	    InputModifiers,
27   	    KeyValueParser,
28   	)
29   	from pcs.cli.common.tools import print_to_stderr
30   	from pcs.cli.file import metadata as file_metadata
31   	from pcs.cli.reports import process_library_reports
32   	from pcs.cli.reports.messages import report_item_msg_from_dto
33   	from pcs.cli.reports.output import deprecation_warning, warn
34   	from pcs.common import file as pcs_file
35   	from pcs.common import file_type_codes, reports
36   	from pcs.common.auth import HostAuthData
37   	from pcs.common.corosync_conf import CorosyncConfDto, CorosyncNodeDto
38   	from pcs.common.file import RawFileError
39   	from pcs.common.host import Destination
40   	from pcs.common.interface import dto
41   	from pcs.common.node_communicator import HostNotFound, Request, RequestData
42   	from pcs.common.str_tools import format_list, indent, join_multilines
43   	from pcs.common.tools import format_os_error
44   	from pcs.common.types import StringCollection, StringIterable
45   	from pcs.lib.commands.remote_node import _destroy_pcmk_remote_env
46   	from pcs.lib.communication.nodes import CheckAuth
47   	from pcs.lib.communication.pcs_cfgsync import SetConfigs
48   	from pcs.lib.communication.tools import RunRemotelyBase, run_and_raise
49   	from pcs.lib.communication.tools import run as run_com_cmd
50   	from pcs.lib.corosync import qdevice_net
51   	from pcs.lib.corosync.live import QuorumStatusException, QuorumStatusFacade
52   	from pcs.lib.errors import LibraryError
53   	from pcs.lib.file.instance import FileInstance
54   	from pcs.lib.file.raw_file import raw_file_error_report
55   	from pcs.lib.node import get_existing_nodes_names
56   	from pcs.lib.pcs_cfgsync.const import SYNCED_CONFIGS
57   	from pcs.utils import parallel_for_nodes
58   	
59   	
60   	def _corosync_conf_local_cmd_call(
61   	    corosync_conf_path: parse_args.ModifierValueType,
62   	    lib_cmd: Callable[[bytes], bytes],
63   	) -> None:
64   	    """
65   	    Call a library command that requires modifications of a corosync.conf file
66   	    supplied as an argument
67   	
68   	    The lib command needs to take the corosync.conf file content as its first
69   	    argument
70   	
71   	        lib_cmd -- the lib command to be called
72   	    """
73   	    corosync_conf_file = pcs_file.RawFile(
74   	        file_metadata.for_file_type(
75   	            file_type_codes.COROSYNC_CONF, corosync_conf_path
76   	        )
77   	    )
78   	
79   	    try:
80   	        corosync_conf_file.write(
81   	            lib_cmd(
82   	                corosync_conf_file.read(),
83   	            ),
84   	            can_overwrite=True,
85   	        )
86   	    except pcs_file.RawFileError as e:
87   	        raise CmdLineInputError(
88   	            reports.messages.FileIoError(
89   	                e.metadata.file_type_code,
90   	                e.action,
91   	                e.reason,
92   	                file_path=e.metadata.path,
93   	            ).message
94   	        ) from e
95   	
96   	
97   	def cluster_cib_upgrade_cmd(
98   	    lib: Any, argv: Argv, modifiers: InputModifiers
99   	) -> None:
100  	    """
101  	    Options:
102  	      * -f - CIB file
103  	    """
104  	    del lib
105  	    modifiers.ensure_only_supported("-f")
106  	    if argv:
107  	        raise CmdLineInputError()
108  	    utils.cluster_upgrade()
109  	
110  	
111  	def cluster_disable_cmd(
112  	    lib: Any, argv: Argv, modifiers: InputModifiers
113  	) -> None:
114  	    """
115  	    Options:
116  	      * --all - disable all cluster nodes
117  	      * --request-timeout - timeout for HTTP requests - effective only when at
118  	        least one node has been specified or --all has been used
119  	    """
120  	    del lib
121  	    modifiers.ensure_only_supported("--all", "--request-timeout")
122  	    if modifiers.get("--all"):
123  	        if argv:
124  	            utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
125  	        disable_cluster_all()
126  	    else:
127  	        disable_cluster(argv)
128  	
129  	
130  	def cluster_enable_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
131  	    """
132  	    Options:
133  	      * --all - enable all cluster nodes
134  	      * --request-timeout - timeout for HTTP requests - effective only when at
135  	        least one node has been specified or --all has been used
136  	    """
137  	    del lib
138  	    modifiers.ensure_only_supported("--all", "--request-timeout")
139  	    if modifiers.get("--all"):
140  	        if argv:
141  	            utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
142  	        enable_cluster_all()
143  	    else:
144  	        enable_cluster(argv)
145  	
146  	
147  	def cluster_stop_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
148  	    """
149  	    Options:
150  	      * --force - no error when possible quorum loss
151  	      * --request-timeout - timeout for HTTP requests - effective only when at
152  	        least one node has been specified
153  	      * --pacemaker - stop pacemaker, only effective when no node has been
154  	        specified
155  	      * --corosync - stop corosync, only effective when no node has been
156  	        specified
157  	      * --all - stop all cluster nodes
158  	    """
159  	    del lib
160  	    modifiers.ensure_only_supported(
161  	        "--wait",
162  	        "--request-timeout",
163  	        "--pacemaker",
164  	        "--corosync",
165  	        "--all",
166  	        "--force",
167  	    )
168  	    if modifiers.get("--all"):
169  	        if argv:
170  	            utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
171  	        stop_cluster_all()
172  	    else:
173  	        stop_cluster(argv)
174  	
175  	
176  	def cluster_start_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
177  	    """
178  	    Options:
179  	      * --wait
180  	      * --request-timeout - timeout for HTTP requests, have effect only if at
181  	        least one node have been specified
182  	      * --all - start all cluster nodes
183  	    """
184  	    del lib
185  	    modifiers.ensure_only_supported(
186  	        "--wait", "--request-timeout", "--all", "--corosync_conf"
187  	    )
188  	    if modifiers.get("--all"):
189  	        if argv:
190  	            utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
191  	        start_cluster_all()
192  	    else:
193  	        start_cluster(argv)
194  	
195  	
196  	def authkey_corosync(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
197  	    """
198  	    Options:
199  	      * --force - skip check for authkey length
200  	      * --request-timeout - timeout for HTTP requests
201  	      * --skip-offline - skip unreachable nodes
202  	    """
203  	    modifiers.ensure_only_supported(
204  	        "--force", "--skip-offline", "--request-timeout"
205  	    )
206  	    if len(argv) > 1:
207  	        raise CmdLineInputError()
208  	    force_flags = []
209  	    if modifiers.get("--force"):
210  	        force_flags.append(reports.codes.FORCE)
211  	    if modifiers.get("--skip-offline"):
212  	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
213  	    corosync_authkey = None
214  	    if argv:
215  	        try:
216  	            with open(argv[0], "rb") as file:
217  	                corosync_authkey = file.read()
218  	        except OSError as e:
219  	            utils.err(f"Unable to read file '{argv[0]}': {format_os_error(e)}")
220  	    lib.cluster.corosync_authkey_change(
221  	        corosync_authkey=corosync_authkey,
222  	        force_flags=force_flags,
223  	    )
224  	
225  	
226  	def sync_nodes(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
227  	    """
228  	    Options:
229  	      * --request-timeout - timeout for HTTP requests
230  	    """
231  	    del lib
232  	    modifiers.ensure_only_supported("--request-timeout")
233  	    if argv:
234  	        raise CmdLineInputError()
235  	
236  	    config = utils.getCorosyncConf()
237  	    nodes, report_list = get_existing_nodes_names(
238  	        utils.get_corosync_conf_facade(conf_text=config)
239  	    )
240  	    if not nodes:
241  	        report_list.append(
242  	            reports.ReportItem.error(
243  	                reports.messages.CorosyncConfigNoNodesDefined()
244  	            )
245  	        )
246  	    if report_list:
247  	        process_library_reports(report_list)
248  	
249  	    for node in nodes:
250  	        utils.setCorosyncConfig(node, config)
251  	
252  	    warn(
253  	        "Corosync configuration has been synchronized, please reload corosync "
254  	        "daemon using 'pcs cluster reload corosync' command."
255  	    )
256  	
257  	
258  	def start_cluster(argv: Argv) -> None:
259  	    """
260  	    Commandline options:
261  	      * --wait
262  	      * --request-timeout - timeout for HTTP requests, have effect only if at
263  	        least one node have been specified
264  	    """
265  	    wait = False
266  	    wait_timeout = None
267  	    if "--wait" in utils.pcs_options:
268  	        wait_timeout = utils.validate_wait_get_timeout(False)
269  	        wait = True
270  	
271  	    if argv:
272  	        nodes = set(argv)  # unique
273  	        start_cluster_nodes(nodes)
274  	        if wait:
275  	            wait_for_nodes_started(nodes, wait_timeout)
276  	        return
277  	
278  	    if not utils.hasCorosyncConf():
279  	        utils.err("cluster is not currently configured on this node")
280  	
281  	    print_to_stderr("Starting Cluster...")
282  	    service_list = ["corosync"]
283  	    if utils.need_to_handle_qdevice_service():
284  	        service_list.append("corosync-qdevice")
285  	    service_list.append("pacemaker")
286  	    for service in service_list:
287  	        utils.start_service(service)
288  	    if wait:
289  	        wait_for_nodes_started([], wait_timeout)
290  	
291  	
292  	def start_cluster_all() -> None:
293  	    """
294  	    Commandline options:
295  	      * --wait
296  	      * --request-timeout - timeout for HTTP requests
297  	    """
298  	    wait = False
299  	    wait_timeout = None
300  	    if "--wait" in utils.pcs_options:
301  	        wait_timeout = utils.validate_wait_get_timeout(False)
302  	        wait = True
303  	
304  	    all_nodes, report_list = get_existing_nodes_names(
305  	        utils.get_corosync_conf_facade()
306  	    )
307  	    if not all_nodes:
308  	        report_list.append(
309  	            reports.ReportItem.error(
310  	                reports.messages.CorosyncConfigNoNodesDefined()
311  	            )
312  	        )
313  	    if report_list:
314  	        process_library_reports(report_list)
315  	
316  	    start_cluster_nodes(all_nodes)
317  	    if wait:
318  	        wait_for_nodes_started(all_nodes, wait_timeout)
319  	
320  	
321  	def start_cluster_nodes(nodes: StringCollection) -> None:
322  	    """
323  	    Commandline options:
324  	      * --request-timeout - timeout for HTTP requests
325  	    """
326  	    # Large clusters take longer time to start up. So we make the timeout longer
327  	    # for each 8 nodes:
328  	    #  1 -  8 nodes: 1 * timeout
329  	    #  9 - 16 nodes: 2 * timeout
330  	    # 17 - 24 nodes: 3 * timeout
331  	    # and so on
332  	    # Users can override this and set their own timeout by specifying
333  	    # the --request-timeout option (see utils.sendHTTPRequest).
334  	    timeout = int(
335  	        settings.default_request_timeout * math.ceil(len(nodes) / 8.0)
336  	    )
337  	    utils.read_known_hosts_file()  # cache known hosts
338  	    node_errors = parallel_for_nodes(
339  	        utils.startCluster, nodes, quiet=True, timeout=timeout
340  	    )
341  	    if node_errors:
342  	        utils.err(
343  	            "unable to start all nodes\n" + "\n".join(node_errors.values())
344  	        )
345  	
346  	
347  	def is_node_fully_started(node_status) -> bool:
348  	    """
349  	    Commandline options: no options
350  	    """
351  	    return (
352  	        "online" in node_status
353  	        and "pending" in node_status
354  	        and node_status["online"]
355  	        and not node_status["pending"]
356  	    )
357  	
358  	
359  	def wait_for_local_node_started(
360  	    stop_at: datetime.datetime, interval: float
361  	) -> tuple[int, str]:
362  	    """
363  	    Commandline options: no options
364  	    """
365  	    try:
366  	        while True:
367  	            time.sleep(interval)
368  	            node_status = lib_pacemaker.get_local_node_status(
369  	                utils.cmd_runner()
370  	            )
371  	            if is_node_fully_started(node_status):
372  	                return 0, "Started"
373  	            if datetime.datetime.now() > stop_at:
374  	                return 1, "Waiting timeout"
375  	    except LibraryError as e:
376  	        return (
377  	            1,
378  	            "Unable to get node status: {0}".format(
379  	                "\n".join(
380  	                    report_item_msg_from_dto(
381  	                        cast(reports.ReportItemDto, item).message
382  	                    ).message
383  	                    for item in e.args
384  	                )
385  	            ),
386  	        )
387  	
388  	
389  	def wait_for_remote_node_started(
390  	    node: str, stop_at: datetime.datetime, interval: float
391  	) -> tuple[int, str]:
392  	    """
393  	    Commandline options:
394  	      * --request-timeout - timeout for HTTP requests
395  	    """
396  	    while True:
397  	        time.sleep(interval)
398  	        code, output = utils.getPacemakerNodeStatus(node)
399  	        # HTTP error, permission denied or unable to auth
400  	        # there is no point in trying again as it won't get magically fixed
401  	        if code in [1, 3, 4]:
402  	            return 1, output
403  	        if code == 0:
404  	            try:
405  	                node_status = json.loads(output)
406  	                if is_node_fully_started(node_status):
407  	                    return 0, "Started"
408  	            except (ValueError, KeyError):
409  	                # this won't get fixed either
410  	                return 1, "Unable to get node status"
411  	        if datetime.datetime.now() > stop_at:
412  	            return 1, "Waiting timeout"
413  	
414  	
415  	def wait_for_nodes_started(
416  	    node_list: StringIterable, timeout: Optional[int] = None
417  	) -> None:
418  	    """
419  	    Commandline options:
420  	      * --request-timeout - timeout for HTTP request, effective only if
421  	        node_list is not empty list
422  	    """
423  	    timeout = 60 * 15 if timeout is None else timeout
424  	    interval = 2
425  	    stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
426  	    print_to_stderr("Waiting for node(s) to start...")
427  	    if not node_list:
428  	        code, output = wait_for_local_node_started(stop_at, interval)
429  	        if code != 0:
430  	            utils.err(output)
431  	        else:
432  	            print_to_stderr(output)
433  	    else:
434  	        utils.read_known_hosts_file()  # cache known hosts
435  	        node_errors = parallel_for_nodes(
436  	            wait_for_remote_node_started, node_list, stop_at, interval
437  	        )
438  	        if node_errors:
439  	            utils.err("unable to verify all nodes have started")
440  	
441  	
442  	def stop_cluster_all() -> None:
443  	    """
444  	    Commandline options:
445  	      * --force - no error when possible quorum loss
446  	      * --request-timeout - timeout for HTTP requests
447  	    """
448  	    all_nodes, report_list = get_existing_nodes_names(
449  	        utils.get_corosync_conf_facade()
450  	    )
451  	    if not all_nodes:
452  	        report_list.append(
453  	            reports.ReportItem.error(
454  	                reports.messages.CorosyncConfigNoNodesDefined()
455  	            )
456  	        )
457  	    if report_list:
458  	        process_library_reports(report_list)
459  	
460  	    stop_cluster_nodes(all_nodes)
461  	
462  	
463  	def stop_cluster_nodes(nodes: StringCollection) -> None:  # noqa: PLR0912
464  	    """
465  	    Commandline options:
466  	      * --force - no error when possible quorum loss
467  	      * --request-timeout - timeout for HTTP requests
468  	    """
469  	    # pylint: disable=too-many-branches
470  	    all_nodes, report_list = get_existing_nodes_names(
471  	        utils.get_corosync_conf_facade()
472  	    )
473  	    unknown_nodes = set(nodes) - set(all_nodes)
474  	    if unknown_nodes:
475  	        if report_list:
476  	            process_library_reports(report_list)
477  	        utils.err(
478  	            "nodes '%s' do not appear to exist in configuration"
479  	            % "', '".join(sorted(unknown_nodes))
480  	        )
481  	
482  	    utils.read_known_hosts_file()  # cache known hosts
483  	    stopping_all = set(nodes) >= set(all_nodes)
484  	    if "--force" not in utils.pcs_options and not stopping_all:
485  	        error_list = []
486  	        for node in nodes:
487  	            retval, data = utils.get_remote_quorumtool_output(node)
488  	            if retval != 0:
489  	                error_list.append(node + ": " + data)
490  	                continue
491  	            try:
492  	                quorum_status_facade = QuorumStatusFacade.from_string(data)
493  	                if not quorum_status_facade.is_quorate:
494  	                    # Get quorum status from a quorate node, non-quorate nodes
495  	                    # may provide inaccurate info. If no node is quorate, there
496  	                    # is no quorum to be lost and therefore no error to be
497  	                    # reported.
498  	                    continue
499  	                if quorum_status_facade.stopping_nodes_cause_quorum_loss(nodes):
500  	                    utils.err(
501  	                        "Stopping the node(s) will cause a loss of the quorum"
502  	                        + ", use --force to override"
503  	                    )
504  	                else:
505  	                    # We have the info, no need to print errors
506  	                    error_list = []
507  	                    break
508  	            except QuorumStatusException:
509  	                if not utils.is_node_offline_by_quorumtool_output(data):
510  	                    error_list.append(node + ": Unable to get quorum status")
511  	                # else the node seems to be stopped already
512  	        if error_list:
513  	            utils.err(
514  	                "Unable to determine whether stopping the nodes will cause "
515  	                + "a loss of the quorum, use --force to override\n"
516  	                + "\n".join(error_list)
517  	            )
518  	
519  	    was_error = False
520  	    node_errors = parallel_for_nodes(
521  	        utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
522  	    )
523  	    accessible_nodes = [node for node in nodes if node not in node_errors]
524  	    if node_errors:
525  	        utils.err(
526  	            "unable to stop all nodes\n" + "\n".join(node_errors.values()),
527  	            exit_after_error=not accessible_nodes,
528  	        )
529  	        was_error = True
530  	
531  	    for node in node_errors:
532  	        print_to_stderr(
533  	            "{0}: Not stopping cluster - node is unreachable".format(node)
534  	        )
535  	
536  	    node_errors = parallel_for_nodes(
537  	        utils.stopCorosync, accessible_nodes, quiet=True
538  	    )
539  	    if node_errors:
540  	        utils.err(
541  	            "unable to stop all nodes\n" + "\n".join(node_errors.values())
542  	        )
543  	    if was_error:
544  	        utils.err("unable to stop all nodes")
545  	
546  	
547  	def enable_cluster(argv: Argv) -> None:
548  	    """
549  	    Commandline options:
550  	      * --request-timeout - timeout for HTTP requests, effective only if at
551  	        least one node has been specified
552  	    """
553  	    if argv:
554  	        enable_cluster_nodes(argv)
555  	        return
556  	
557  	    try:
558  	        utils.enableServices()
559  	    except LibraryError as e:
560  	        process_library_reports(list(e.args))
561  	
562  	
563  	def disable_cluster(argv: Argv) -> None:
564  	    """
565  	    Commandline options:
566  	      * --request-timeout - timeout for HTTP requests, effective only if at
567  	        least one node has been specified
568  	    """
569  	    if argv:
570  	        disable_cluster_nodes(argv)
571  	        return
572  	
573  	    try:
574  	        utils.disableServices()
575  	    except LibraryError as e:
576  	        process_library_reports(list(e.args))
577  	
578  	
579  	def enable_cluster_all() -> None:
580  	    """
581  	    Commandline options:
582  	      * --request-timeout - timeout for HTTP requests
583  	    """
584  	    all_nodes, report_list = get_existing_nodes_names(
585  	        utils.get_corosync_conf_facade()
586  	    )
587  	    if not all_nodes:
588  	        report_list.append(
589  	            reports.ReportItem.error(
590  	                reports.messages.CorosyncConfigNoNodesDefined()
591  	            )
592  	        )
593  	    if report_list:
594  	        process_library_reports(report_list)
595  	
596  	    enable_cluster_nodes(all_nodes)
597  	
598  	
599  	def disable_cluster_all() -> None:
600  	    """
601  	    Commandline options:
602  	      * --request-timeout - timeout for HTTP requests
603  	    """
604  	    all_nodes, report_list = get_existing_nodes_names(
605  	        utils.get_corosync_conf_facade()
606  	    )
607  	    if not all_nodes:
608  	        report_list.append(
609  	            reports.ReportItem.error(
610  	                reports.messages.CorosyncConfigNoNodesDefined()
611  	            )
612  	        )
613  	    if report_list:
614  	        process_library_reports(report_list)
615  	
616  	    disable_cluster_nodes(all_nodes)
617  	
618  	
619  	def enable_cluster_nodes(nodes: StringIterable) -> None:
620  	    """
621  	    Commandline options:
622  	      * --request-timeout - timeout for HTTP requests
623  	    """
624  	    error_list = utils.map_for_error_list(utils.enableCluster, nodes)
625  	    if error_list:
626  	        utils.err("unable to enable all nodes\n" + "\n".join(error_list))
627  	
628  	
629  	def disable_cluster_nodes(nodes: StringIterable) -> None:
630  	    """
631  	    Commandline options:
632  	      * --request-timeout - timeout for HTTP requests
633  	    """
634  	    error_list = utils.map_for_error_list(utils.disableCluster, nodes)
635  	    if error_list:
636  	        utils.err("unable to disable all nodes\n" + "\n".join(error_list))
637  	
638  	
639  	def destroy_cluster(argv: Argv) -> None:
640  	    """
641  	    Commandline options:
642  	      * --request-timeout - timeout for HTTP requests
643  	    """
644  	    if argv:
645  	        utils.read_known_hosts_file()  # cache known hosts
646  	        # stop pacemaker and resources while cluster is still quorate
647  	        nodes = argv
648  	        node_errors = parallel_for_nodes(
649  	            utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
650  	        )
651  	        # proceed with destroy regardless of errors
652  	        # destroy will stop any remaining cluster daemons
653  	        node_errors = parallel_for_nodes(
654  	            utils.destroyCluster, nodes, quiet=True
655  	        )
656  	        if node_errors:
657  	            utils.err(
658  	                "unable to destroy cluster\n" + "\n".join(node_errors.values())
659  	            )
660  	
661  	
662  	def stop_cluster(argv: Argv) -> None:
663  	    """
664  	    Commandline options:
665  	      * --force - no error when possible quorum loss
666  	      * --request-timeout - timeout for HTTP requests - effective only when at
667  	        least one node has been specified
668  	      * --pacemaker - stop pacemaker, only effective when no node has been
669  	        specified
670  	    """
671  	    if argv:
672  	        stop_cluster_nodes(argv)
673  	        return
674  	
675  	    if "--force" not in utils.pcs_options:
676  	        # corosync 3.0.1 and older:
677  	        # - retval is 0 on success if a node is not in a partition with quorum
678  	        # - retval is 1 on error OR on success if a node has quorum
679  	        # corosync 3.0.2 and newer:
680  	        # - retval is 0 on success if a node has quorum
681  	        # - retval is 1 on error
682  	        # - retval is 2 on success if a node is not in a partition with quorum
683  	        output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"])
684  	        try:
685  	            if QuorumStatusFacade.from_string(
686  	                output
687  	            ).stopping_local_node_cause_quorum_loss():
688  	                utils.err(
689  	                    "Stopping the node will cause a loss of the quorum"
690  	                    + ", use --force to override"
691  	                )
692  	        except QuorumStatusException:
693  	            if not utils.is_node_offline_by_quorumtool_output(output):
694  	                utils.err(
695  	                    "Unable to determine whether stopping the node will cause "
696  	                    + "a loss of the quorum, use --force to override"
697  	                )
698  	            # else the node seems to be stopped already, proceed to be sure
699  	
700  	    stop_all = (
701  	        "--pacemaker" not in utils.pcs_options
702  	        and "--corosync" not in utils.pcs_options
703  	    )
704  	    if stop_all or "--pacemaker" in utils.pcs_options:
705  	        stop_cluster_pacemaker()
706  	    if stop_all or "--corosync" in utils.pcs_options:
707  	        stop_cluster_corosync()
708  	
709  	
710  	def stop_cluster_pacemaker() -> None:
711  	    """
712  	    Commandline options: no options
713  	    """
714  	    print_to_stderr("Stopping Cluster (pacemaker)...")
715  	    utils.stop_service("pacemaker")
716  	
717  	
718  	def stop_cluster_corosync() -> None:
719  	    """
720  	    Commandline options: no options
721  	    """
722  	    print_to_stderr("Stopping Cluster (corosync)...")
723  	    service_list = []
724  	    if utils.need_to_handle_qdevice_service():
725  	        service_list.append("corosync-qdevice")
726  	    service_list.append("corosync")
727  	    for service in service_list:
728  	        utils.stop_service(service)
729  	
730  	
731  	def kill_cluster(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
732  	    """
733  	    Options: no options
734  	    """
735  	    del lib
736  	    if argv:
737  	        raise CmdLineInputError()
738  	    modifiers.ensure_only_supported()
739  	    dummy_output, dummy_retval = kill_local_cluster_services()
740  	
741  	
742  	#    if dummy_retval != 0:
743  	#        print "Error: unable to execute killall -9"
744  	#        print output
745  	#        sys.exit(1)
746  	
747  	
748  	def kill_local_cluster_services() -> tuple[str, int]:
749  	    """
750  	    Commandline options: no options
751  	    """
752  	    all_cluster_daemons = [
753  	        # Daemons taken from cluster-clean script in pacemaker
754  	        "pacemaker-attrd",
755  	        "pacemaker-based",
756  	        "pacemaker-controld",
757  	        "pacemaker-execd",
758  	        "pacemaker-fenced",
759  	        "pacemaker-remoted",
760  	        "pacemaker-schedulerd",
761  	        "pacemakerd",
762  	        "dlm_controld",
763  	        "gfs_controld",
764  	        # Corosync daemons
765  	        "corosync-qdevice",
766  	        "corosync",
767  	    ]
768  	    return utils.run([settings.killall_exec, "-9"] + all_cluster_daemons)
769  	
770  	
771  	def cluster_push(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912, PLR0915
772  	    """
773  	    Options:
774  	      * --wait
775  	      * --config - push only configuration section of CIB
776  	      * -f - CIB file
777  	    """
778  	    # pylint: disable=too-many-branches
779  	    # pylint: disable=too-many-locals
780  	    # pylint: disable=too-many-statements
781  	
782  	    def get_details_from_crm_verify():
783  	        # get a new runner to run crm_verify command and pass the CIB filename
784  	        # into it so that the verify is run on the file instead on the live
785  	        # cluster CIB
786  	        verify_runner = utils.cmd_runner(cib_file_override=filename)
787  	        # Request verbose output, otherwise we may only get an unhelpful
788  	        # message:
789  	        # Configuration invalid (with errors) (-V may provide more detail)
790  	        # verify_returncode is always expected to be non-zero to indicate
791  	        # invalid CIB - ve run the verify because the CIB is invalid
792  	        (
793  	            verify_stdout,
794  	            verify_stderr,
795  	            verify_returncode,
796  	            verify_can_be_more_verbose,
797  	        ) = lib_pacemaker.verify(verify_runner, verbose=True)
798  	        return join_multilines([verify_stdout, verify_stderr])
799  	
800  	    del lib
801  	    modifiers.ensure_only_supported("--wait", "--config", "-f")
802  	    if len(argv) > 2:
803  	        raise CmdLineInputError()
804  	
805  	    filename = None
806  	    scope = None
807  	    timeout = None
808  	    diff_against = None
809  	
810  	    if modifiers.get("--wait"):
811  	        timeout = utils.validate_wait_get_timeout()
812  	    for arg in argv:
813  	        if "=" not in arg:
814  	            filename = arg
815  	        else:
816  	            arg_name, arg_value = arg.split("=", 1)
817  	            if arg_name == "scope":
818  	                if modifiers.get("--config"):
819  	                    utils.err("Cannot use both scope and --config")
820  	                if not utils.is_valid_cib_scope(arg_value):
821  	                    utils.err("invalid CIB scope '%s'" % arg_value)
822  	                else:
823  	                    scope = arg_value
824  	            elif arg_name == "diff-against":
825  	                diff_against = arg_value
826  	            else:
827  	                raise CmdLineInputError()
828  	    if modifiers.get("--config"):
829  	        scope = "configuration"
830  	    if diff_against and scope:
831  	        utils.err("Cannot use both scope and diff-against")
832  	    if not filename:
833  	        raise CmdLineInputError()
834  	
835  	    try:
CID (unavailable; MK=26a3af0897e6812f1acaacd47e7ce85f) (#1 of 1): XML external entity processing enabled (SIGMA.xml_external_entity_enabled):
(1) Event Sigma main event: The application uses Python's built in `xml` module which does not properly handle erroneous or maliciously constructed data, making the application vulnerable to one or more types of XML attacks.
(2) Event remediation: Avoid using the `xml` module. Consider using the `defusedxml` module or similar which safely prevents all XML entity attacks.
836  	        new_cib_dom = xml.dom.minidom.parse(filename)
837  	        if scope and not new_cib_dom.getElementsByTagName(scope):
838  	            utils.err(
839  	                "unable to push cib, scope '%s' not present in new cib" % scope
840  	            )
841  	    except (OSError, ExpatError) as e:
842  	        utils.err("unable to parse new cib: %s" % e)
843  	
844  	    EXITCODE_INVALID_CIB = 78
845  	    runner = utils.cmd_runner()
846  	
847  	    if diff_against:
848  	        command = [
849  	            settings.crm_diff_exec,
850  	            "--original",
851  	            diff_against,
852  	            "--new",
853  	            filename,
854  	            "--no-version",
855  	        ]
856  	        patch, stderr, retval = runner.run(command)
857  	        #  0 (CRM_EX_OK) - success with no difference
858  	        #  1 (CRM_EX_ERROR) - success with difference
859  	        # 64 (CRM_EX_USAGE) - usage error
860  	        # 65 (CRM_EX_DATAERR) - XML fragments not parseable
861  	        if retval > 1:
862  	            utils.err("unable to diff the CIBs:\n" + stderr)
863  	        if retval == 0:
864  	            print_to_stderr(
865  	                "The new CIB is the same as the original CIB, nothing to push."
866  	            )
867  	            sys.exit(0)
868  	
869  	        command = [
870  	            settings.cibadmin_exec,
871  	            "--patch",
872  	            "--xml-pipe",
873  	        ]
874  	        output, stderr, retval = runner.run(command, patch)
875  	        if retval != 0:
876  	            push_output = stderr + output
877  	            verify_output = (
878  	                get_details_from_crm_verify()
879  	                if retval == EXITCODE_INVALID_CIB
880  	                else ""
881  	            )
882  	            error_text = (
883  	                f"{push_output}\n\n{verify_output}"
884  	                if verify_output.strip()
885  	                else push_output
886  	            )
887  	            utils.err("unable to push cib\n" + error_text)
888  	
889  	    else:
890  	        command = ["cibadmin", "--replace", "--xml-file", filename]
891  	        if scope:
892  	            command.append("--scope=%s" % scope)
893  	        output, retval = utils.run(command)
894  	        # 103 (CRM_EX_OLD) - update older than existing config
895  	        if retval == 103:
896  	            utils.err(
897  	                "Unable to push to the CIB because pushed configuration "
898  	                "is older than existing one. If you are sure you want to "
899  	                "push this configuration, try to use --config to replace only "
900  	                "configuration part instead of whole CIB. Otherwise get current"
901  	                " configuration by running command 'pcs cluster cib' and update"
902  	                " that."
903  	            )
904  	        elif retval != 0:
905  	            verify_output = (
906  	                get_details_from_crm_verify()
907  	                if retval == EXITCODE_INVALID_CIB
908  	                else ""
909  	            )
910  	            error_text = (
911  	                f"{output}\n\n{verify_output}"
912  	                if verify_output.strip()
913  	                else output
914  	            )
915  	            utils.err("unable to push cib\n" + error_text)
916  	
917  	    print_to_stderr("CIB updated")
918  	    try:
919  	        cib_errors = lib_pacemaker.get_cib_verification_errors(runner)
920  	        if cib_errors:
921  	            print_to_stderr("\n".join(cib_errors))
922  	    except lib_pacemaker.BadApiResultFormat as e:
923  	        print_to_stderr(
924  	            f"Unable to verify CIB: {e.original_exception}\n"
925  	            f"crm_verify output:\n{e.pacemaker_response}"
926  	        )
927  	
928  	    if not modifiers.is_specified("--wait"):
929  	        return
930  	    cmd = ["crm_resource", "--wait"]
931  	    if timeout:
932  	        cmd.extend(["--timeout", str(timeout)])
933  	    output, retval = utils.run(cmd)
934  	    if retval != 0:
935  	        msg = []
936  	        if retval == settings.pacemaker_wait_timeout_status:
937  	            msg.append("waiting timeout")
938  	        if output:
939  	            msg.append("\n" + output)
940  	        utils.err("\n".join(msg).strip())
941  	
942  	
943  	def cluster_edit(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912
944  	    """
945  	    Options:
946  	      * --config - edit configuration section of CIB
947  	      * -f - CIB file
948  	      * --wait
949  	    """
950  	    # pylint: disable=too-many-branches
951  	    modifiers.ensure_only_supported("--config", "--wait", "-f")
952  	    if "EDITOR" in os.environ:
953  	        if len(argv) > 1:
954  	            raise CmdLineInputError()
955  	
956  	        scope = None
957  	        scope_arg = ""
958  	        for arg in argv:
959  	            if "=" not in arg:
960  	                raise CmdLineInputError()
961  	            arg_name, arg_value = arg.split("=", 1)
962  	            if arg_name == "scope" and not modifiers.get("--config"):
963  	                if not utils.is_valid_cib_scope(arg_value):
964  	                    utils.err("invalid CIB scope '%s'" % arg_value)
965  	                else:
966  	                    scope_arg = arg
967  	                    scope = arg_value
968  	            else:
969  	                raise CmdLineInputError()
970  	        if modifiers.get("--config"):
971  	            scope = "configuration"
972  	            # Leave scope_arg empty as cluster_push will pick up a --config
973  	            # option from utils.pcs_options
974  	            scope_arg = ""
975  	
976  	        editor = os.environ["EDITOR"]
977  	        cib = utils.get_cib(scope)
978  	        with tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs") as tempcib:
979  	            tempcib.write(cib)
980  	            tempcib.flush()
981  	            try:
982  	                subprocess.call([editor, tempcib.name])
983  	            except OSError:
984  	                utils.err("unable to open file with $EDITOR: " + editor)
985  	
986  	            tempcib.seek(0)
987  	            newcib = "".join(tempcib.readlines())
988  	            if newcib == cib:
989  	                print_to_stderr("CIB not updated, no changes detected")
990  	            else:
991  	                cluster_push(
992  	                    lib,
993  	                    [arg for arg in [tempcib.name, scope_arg] if arg],
994  	                    modifiers.get_subset("--wait", "--config", "-f"),
995  	                )
996  	
997  	    else:
998  	        utils.err("$EDITOR environment variable is not set")
999  	
1000 	
1001 	def get_cib(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912
1002 	    """
1003 	    Options:
1004 	      * --config show configuration section of CIB
1005 	      * -f - CIB file
1006 	    """
1007 	    # pylint: disable=too-many-branches
1008 	    del lib
1009 	    modifiers.ensure_only_supported("--config", "-f")
1010 	    if len(argv) > 2:
1011 	        raise CmdLineInputError()
1012 	
1013 	    filename = None
1014 	    scope = None
1015 	    for arg in argv:
1016 	        if "=" not in arg:
1017 	            filename = arg
1018 	        else:
1019 	            arg_name, arg_value = arg.split("=", 1)
1020 	            if arg_name == "scope" and not modifiers.get("--config"):
1021 	                if not utils.is_valid_cib_scope(arg_value):
1022 	                    utils.err("invalid CIB scope '%s'" % arg_value)
1023 	                else:
1024 	                    scope = arg_value
1025 	            else:
1026 	                raise CmdLineInputError()
1027 	    if modifiers.get("--config"):
1028 	        scope = "configuration"
1029 	
1030 	    if not filename:
1031 	        print(utils.get_cib(scope).rstrip())
1032 	    else:
1033 	        output = utils.get_cib(scope)
1034 	        if not output:
1035 	            utils.err("No data in the CIB")
1036 	        try:
1037 	            with open(filename, "w") as cib_file:
1038 	                cib_file.write(output)
1039 	        except OSError as e:
1040 	            utils.err(
1041 	                "Unable to write to file '%s', %s" % (filename, e.strerror)
1042 	            )
1043 	
1044 	
1045 	class RemoteAddNodes(RunRemotelyBase):
1046 	    def __init__(self, report_processor, target, data):
1047 	        super().__init__(report_processor)
1048 	        self._target = target
1049 	        self._data = data
1050 	        self._success = False
1051 	
1052 	    def get_initial_request_list(self):
1053 	        return [
1054 	            Request(
1055 	                self._target,
1056 	                RequestData(
1057 	                    "remote/cluster_add_nodes",
1058 	                    [("data_json", json.dumps(self._data))],
1059 	                ),
1060 	            )
1061 	        ]
1062 	
1063 	    def _process_response(self, response):
1064 	        node_label = response.request.target.label
1065 	        report_item = self._get_response_report(response)
1066 	        if report_item is not None:
1067 	            self._report(report_item)
1068 	            return
1069 	
1070 	        try:
1071 	            output = json.loads(response.data)
1072 	            for report_dict in output["report_list"]:
1073 	                self._report(
1074 	                    reports.ReportItem(
1075 	                        severity=reports.ReportItemSeverity(
1076 	                            report_dict["severity"],
1077 	                            report_dict["forceable"],
1078 	                        ),
1079 	                        message=reports.messages.LegacyCommonMessage(
1080 	                            report_dict["code"],
1081 	                            report_dict["info"],
1082 	                            report_dict["report_text"],
1083 	                        ),
1084 	                    )
1085 	                )
1086 	            if output["status"] == "success":
1087 	                self._success = True
1088 	            elif output["status"] != "error":
1089 	                print_to_stderr("Error: {}".format(output["status_msg"]))
1090 	
1091 	        except (KeyError, json.JSONDecodeError):
1092 	            self._report(
1093 	                reports.ReportItem.warning(
1094 	                    reports.messages.InvalidResponseFormat(node_label)
1095 	                )
1096 	            )
1097 	
1098 	    def on_complete(self):
1099 	        return self._success
1100 	
1101 	
1102 	def node_add_outside_cluster(
1103 	    lib: Any, argv: Argv, modifiers: InputModifiers
1104 	) -> None:
1105 	    """
1106 	    Options:
1107 	      * --wait - wait until new node will start up, effective only when --start
1108 	        is specified
1109 	      * --start - start new node
1110 	      * --enable - enable new node
1111 	      * --force - treat validation issues and not resolvable addresses as
1112 	        warnings instead of errors
1113 	      * --skip-offline - skip unreachable nodes
1114 	      * --no-watchdog-validation - do not validatate watchdogs
1115 	      * --request-timeout - HTTP request timeout
1116 	    """
1117 	    del lib
1118 	    modifiers.ensure_only_supported(
1119 	        "--wait",
1120 	        "--start",
1121 	        "--enable",
1122 	        "--force",
1123 	        "--skip-offline",
1124 	        "--no-watchdog-validation",
1125 	        "--request-timeout",
1126 	    )
1127 	    if len(argv) < 2:
1128 	        raise CmdLineInputError(
1129 	            "Usage: pcs cluster node add-outside <cluster node> <node name> "
1130 	            "[addr=<node address>]... [watchdog=<watchdog path>] "
1131 	            "[device=<SBD device path>]... [--start [--wait[=<n>]]] [--enable] "
1132 	            "[--no-watchdog-validation]"
1133 	        )
1134 	
1135 	    cluster_node, *argv = argv
1136 	    node_dict = _parse_add_node(argv)
1137 	
1138 	    force_flags = []
1139 	    if modifiers.get("--force"):
1140 	        force_flags.append(reports.codes.FORCE)
1141 	    if modifiers.get("--skip-offline"):
1142 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1143 	    cmd_data = dict(
1144 	        nodes=[node_dict],
1145 	        wait=modifiers.get("--wait"),
1146 	        start=modifiers.get("--start"),
1147 	        enable=modifiers.get("--enable"),
1148 	        no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
1149 	        force_flags=force_flags,
1150 	    )
1151 	
1152 	    lib_env = utils.get_lib_env()
1153 	    report_processor = lib_env.report_processor
1154 	    target_factory = lib_env.get_node_target_factory()
1155 	    report_list, target_list = target_factory.get_target_list_with_reports(
1156 	        [cluster_node],
1157 	        skip_non_existing=False,
1158 	        allow_skip=False,
1159 	    )
1160 	    report_processor.report_list(report_list)
1161 	    if report_processor.has_errors:
1162 	        raise LibraryError()
1163 	
1164 	    com_cmd = RemoteAddNodes(report_processor, target_list[0], cmd_data)
1165 	    was_successful = run_com_cmd(lib_env.get_node_communicator(), com_cmd)
1166 	
1167 	    if not was_successful:
1168 	        raise LibraryError()
1169 	
1170 	
1171 	def node_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1172 	    """
1173 	    Options:
1174 	      * --force - continue even though the action may cause qourum loss
1175 	      * --skip-offline - skip unreachable nodes
1176 	      * --request-timeout - HTTP request timeout
1177 	    """
1178 	    modifiers.ensure_only_supported(
1179 	        "--force",
1180 	        "--skip-offline",
1181 	        "--request-timeout",
1182 	    )
1183 	    if not argv:
1184 	        raise CmdLineInputError()
1185 	
1186 	    force_flags = []
1187 	    if modifiers.get("--force"):
1188 	        force_flags.append(reports.codes.FORCE)
1189 	    if modifiers.get("--skip-offline"):
1190 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1191 	
1192 	    lib.cluster.remove_nodes(argv, force_flags=force_flags)
1193 	
1194 	
1195 	def cluster_uidgid(  # noqa: PLR0912
1196 	    lib: Any, argv: Argv, modifiers: InputModifiers, silent_list: bool = False
1197 	) -> None:
1198 	    """
1199 	    Options: no options
1200 	    """
1201 	    # pylint: disable=too-many-branches
1202 	    # pylint: disable=too-many-locals
1203 	    del lib
1204 	    modifiers.ensure_only_supported()
1205 	    if not argv:
1206 	        uid_gid_files = os.listdir(settings.corosync_uidgid_dir)
1207 	        uid_gid_lines: list[str] = []
1208 	        for ug_file in uid_gid_files:
1209 	            uid_gid_dict = utils.read_uid_gid_file(ug_file)
1210 	            if "uid" in uid_gid_dict or "gid" in uid_gid_dict:
1211 	                line = "UID/GID: uid="
1212 	                if "uid" in uid_gid_dict:
1213 	                    line += uid_gid_dict["uid"]
1214 	                line += " gid="
1215 	                if "gid" in uid_gid_dict:
1216 	                    line += uid_gid_dict["gid"]
1217 	
1218 	                uid_gid_lines.append(line)
1219 	        if uid_gid_lines:
1220 	            print("\n".join(sorted(uid_gid_lines)))
1221 	        elif not silent_list:
1222 	            print_to_stderr("No uidgids configured")
1223 	        return
1224 	
1225 	    command = argv.pop(0)
1226 	    uid = ""
1227 	    gid = ""
1228 	
1229 	    if command in {"add", "delete", "remove"} and argv:
1230 	        for arg in argv:
1231 	            if arg.find("=") == -1:
1232 	                utils.err(
1233 	                    "uidgid options must be of the form uid=<uid> gid=<gid>"
1234 	                )
1235 	
1236 	            (key, value) = arg.split("=", 1)
1237 	            if key not in {"uid", "gid"}:
1238 	                utils.err(
1239 	                    "%s is not a valid key, you must use uid or gid" % key
1240 	                )
1241 	
1242 	            if key == "uid":
1243 	                uid = value
1244 	            if key == "gid":
1245 	                gid = value
1246 	        if uid == "" and gid == "":
1247 	            utils.err("you must set either uid or gid")
1248 	
1249 	        if command == "add":
1250 	            utils.write_uid_gid_file(uid, gid)
1251 	        elif command in {"delete", "remove"}:
1252 	            file_removed = utils.remove_uid_gid_file(uid, gid)
1253 	            if not file_removed:
1254 	                utils.err(
1255 	                    "no uidgid files with uid=%s and gid=%s found" % (uid, gid)
1256 	                )
1257 	    else:
1258 	        raise CmdLineInputError()
1259 	
1260 	
1261 	def cluster_get_corosync_conf(
1262 	    lib: Any, argv: Argv, modifiers: InputModifiers
1263 	) -> None:
1264 	    """
1265 	    Options:
1266 	      * --request-timeout - timeout for HTTP requests, effetive only when at
1267 	        least one node has been specified
1268 	    """
1269 	    del lib
1270 	    modifiers.ensure_only_supported("--request-timeout")
1271 	    if len(argv) > 1:
1272 	        raise CmdLineInputError()
1273 	
1274 	    if not argv:
1275 	        print(utils.getCorosyncConf().rstrip())
1276 	        return
1277 	
1278 	    node = argv[0]
1279 	    retval, output = utils.getCorosyncConfig(node)
1280 	    if retval != 0:
1281 	        utils.err(output)
1282 	    else:
1283 	        print(output.rstrip())
1284 	
1285 	
1286 	def cluster_reload(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1287 	    """
1288 	    Options: no options
1289 	    """
1290 	    del lib
1291 	    modifiers.ensure_only_supported()
1292 	    if len(argv) != 1 or argv[0] != "corosync":
1293 	        raise CmdLineInputError()
1294 	
1295 	    output, retval = utils.reloadCorosync()
1296 	    if retval != 0 or "invalid option" in output:
1297 	        utils.err(output.rstrip())
1298 	    print_to_stderr("Corosync reloaded")
1299 	
1300 	
1301 	# Completely tear down the cluster & remove config files
1302 	# Code taken from cluster-clean script in pacemaker
1303 	def cluster_destroy(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912
1304 	    """
1305 	    Options:
1306 	      * --all - destroy cluster on all cluster nodes => destroy whole cluster
1307 	      * --force - required for destroying the cluster - DEPRECATED
1308 	      * --request-timeout - timeout of HTTP requests, effective only with --all
1309 	      * --yes - required for destroying the cluster
1310 	    """
1311 	    # pylint: disable=too-many-branches
1312 	    # pylint: disable=too-many-statements
1313 	    del lib
1314 	    modifiers.ensure_only_supported(
1315 	        "--all", "--force", "--request-timeout", "--yes"
1316 	    )
1317 	    if argv:
1318 	        raise CmdLineInputError()
1319 	    if utils.is_run_interactive():
1320 	        warn(
1321 	            "It is recommended to run 'pcs cluster stop' before "
1322 	            "destroying the cluster."
1323 	        )
1324 	        if not utils.get_continue_confirmation(
1325 	            "This would kill all cluster processes and then PERMANENTLY remove "
1326 	            "cluster state and configuration",
1327 	            bool(modifiers.get("--yes")),
1328 	            bool(modifiers.get("--force")),
1329 	        ):
1330 	            return
1331 	    if modifiers.get("--all"):
1332 	        # load data
1333 	        cib = None
1334 	        lib_env = utils.get_lib_env()
1335 	        try:
1336 	            cib = lib_env.get_cib()
1337 	        except LibraryError:
1338 	            warn(
1339 	                "Unable to load CIB to get guest and remote nodes from it, "
1340 	                "those nodes will not be deconfigured."
1341 	            )
1342 	        corosync_nodes, report_list = get_existing_nodes_names(
1343 	            utils.get_corosync_conf_facade()
1344 	        )
1345 	        if not corosync_nodes:
1346 	            report_list.append(
1347 	                reports.ReportItem.error(
1348 	                    reports.messages.CorosyncConfigNoNodesDefined()
1349 	                )
1350 	            )
1351 	        if report_list:
1352 	            process_library_reports(report_list)
1353 	
1354 	        # destroy remote and guest nodes
1355 	        if cib is not None:
1356 	            try:
1357 	                all_remote_nodes, report_list = get_existing_nodes_names(
1358 	                    cib=cib
1359 	                )
1360 	                if report_list:
1361 	                    process_library_reports(report_list)
1362 	                if all_remote_nodes:
1363 	                    _destroy_pcmk_remote_env(
1364 	                        lib_env,
1365 	                        all_remote_nodes,
1366 	                        skip_offline_nodes=True,
1367 	                        allow_fails=True,
1368 	                    )
1369 	            except LibraryError as e:
1370 	                process_library_reports(list(e.args))
1371 	
1372 	        # destroy full-stack nodes
1373 	        destroy_cluster(corosync_nodes)
1374 	    else:
1375 	        print_to_stderr("Shutting down pacemaker/corosync services...")
1376 	        for service in ["pacemaker", "corosync-qdevice", "corosync"]:
1377 	            # It is safe to ignore error since we want it not to be running
1378 	            # anyways.
1379 	            with contextlib.suppress(LibraryError):
1380 	                utils.stop_service(service)
1381 	        print_to_stderr("Killing any remaining services...")
1382 	        kill_local_cluster_services()
1383 	        # previously errors were suppressed in here, let's keep it that way
1384 	        # for now
1385 	        with contextlib.suppress(Exception):
1386 	            utils.disableServices()
1387 	
1388 	        # it's not a big deal if sbd disable fails
1389 	        with contextlib.suppress(Exception):
1390 	            service_manager = utils.get_service_manager()
1391 	            service_manager.disable(settings.sbd_service_name)
1392 	
1393 	        print_to_stderr("Removing all cluster configuration files...")
1394 	        dummy_output, dummy_retval = utils.run(
1395 	            [
1396 	                settings.rm_exec,
1397 	                "-f",
1398 	                settings.corosync_conf_file,
1399 	                settings.corosync_authkey_file,
1400 	                settings.pacemaker_authkey_file,
1401 	                settings.pcsd_dr_config_location,
1402 	            ]
1403 	        )
1404 	        state_files = [
1405 	            "cib-*",
1406 	            "cib.*",
1407 	            "cib.xml*",
1408 	            "core.*",
1409 	            "cts.*",
1410 	            "hostcache",
1411 	            "pe*.bz2",
1412 	        ]
1413 	        for name in state_files:
1414 	            dummy_output, dummy_retval = utils.run(
1415 	                [
1416 	                    settings.find_exec,
1417 	                    settings.pacemaker_local_state_dir,
1418 	                    "-name",
1419 	                    name,
1420 	                    "-exec",
1421 	                    settings.rm_exec,
1422 	                    "-f",
1423 	                    "{}",
1424 	                    ";",
1425 	                ]
1426 	            )
1427 	        # errors from deleting other files are suppressed as well we do not
1428 	        # want to fail if qdevice was not set up
1429 	        with contextlib.suppress(Exception):
1430 	            qdevice_net.client_destroy()
1431 	
1432 	
1433 	def cluster_verify(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1434 	    """
1435 	    Options:
1436 	      * -f - CIB file
1437 	      * --full - more verbose output
1438 	    """
1439 	    modifiers.ensure_only_supported("-f", "--full")
1440 	    if argv:
1441 	        raise CmdLineInputError()
1442 	
1443 	    lib.cluster.verify(verbose=modifiers.get("--full"))
1444 	
1445 	
1446 	def cluster_report(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912
1447 	    """
1448 	    Options:
1449 	      * --force - allow overwriting existing files - DEPRECATED
1450 	      * --from - timestamp
1451 	      * --to - timestamp
1452 	      * --overwrite - allow overwriting existing files
1453 	        The resulting file should be stored on the machine where pcs cli is
1454 	        running, not on the machine where pcs daemon is running. Therefore we
1455 	        want to use --overwrite and not --force.
1456 	    """
1457 	
1458 	    # pylint: disable=too-many-branches
1459 	    del lib
1460 	    modifiers.ensure_only_supported("--force", "--from", "--overwrite", "--to")
1461 	    if len(argv) != 1:
1462 	        raise CmdLineInputError()
1463 	
1464 	    outfile = argv[0]
1465 	    dest_outfile = outfile + ".tar.bz2"
1466 	    if os.path.exists(dest_outfile):
1467 	        if not (modifiers.get("--overwrite") or modifiers.get("--force")):
1468 	            utils.err(
1469 	                dest_outfile + " already exists, use --overwrite to overwrite"
1470 	            )
1471 	            return
1472 	        if modifiers.get("--force"):
1473 	            # deprecated in the first pcs-0.12 version, replaced by --overwrite
1474 	            deprecation_warning(
1475 	                "Using --force to confirm this action is deprecated and might "
1476 	                "be removed in a future release, use --overwrite instead"
1477 	            )
1478 	        try:
1479 	            os.remove(dest_outfile)
1480 	        except OSError as e:
1481 	            utils.err(f"Unable to remove {dest_outfile}: {format_os_error(e)}")
1482 	    crm_report_opts = []
1483 	
1484 	    crm_report_opts.append("-f")
1485 	    if modifiers.is_specified("--from"):
1486 	        crm_report_opts.append(str(modifiers.get("--from")))
1487 	        if modifiers.is_specified("--to"):
1488 	            crm_report_opts.append("-t")
1489 	            crm_report_opts.append(str(modifiers.get("--to")))
1490 	    else:
1491 	        yesterday = datetime.datetime.now() - datetime.timedelta(1)
1492 	        crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M"))
1493 	
1494 	    crm_report_opts.append(outfile)
1495 	    output, retval = utils.run([settings.crm_report_exec] + crm_report_opts)
1496 	    if retval != 0 and (
1497 	        "ERROR: Cannot determine nodes; specify --nodes or --single-node"
1498 	        in output
1499 	    ):
1500 	        utils.err("cluster is not configured on this node")
1501 	    newoutput = ""
1502 	    for line in output.split("\n"):
1503 	        if line.startswith(("cat:", "grep", "tail")):
1504 	            continue
1505 	        if "We will attempt to remove" in line:
1506 	            continue
1507 	        if "-p option" in line:
1508 	            continue
1509 	        if "However, doing" in line:
1510 	            continue
1511 	        if "to diagnose" in line:
1512 	            continue
1513 	        new_line = line
1514 	        if "--dest" in line:
1515 	            new_line = line.replace("--dest", "<dest>")
1516 	        newoutput = newoutput + new_line + "\n"
1517 	    if retval != 0:
1518 	        utils.err(newoutput)
1519 	    print_to_stderr(newoutput)
1520 	
1521 	
1522 	# TODO this should be implemented in multiple lib commands, and the cli should
1523 	# only call these commands as needed
1524 	# - lib command for checking auth, that returns not authorized nodes
1525 	# - if any not authorized nodes
1526 	#   - the cli asks for a username and pass
1527 	#   - call lib command for authorizing hosts
1528 	# - else:
1529 	#   - call lib command to send the configs to other nodes
1530 	#
1531 	# This command itself is always run as root, see app.py (_non_root_run)
1532 	# So we do not need to deal with the configs in .pcs for non-root run
1533 	def cluster_auth_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:  # noqa: PLR0912
1534 	    """
1535 	    Options:
1536 	      * --corosync_conf - corosync.conf file
1537 	      * --request-timeout - timeout of HTTP requests
1538 	      * -u - username
1539 	      * -p - password
1540 	    """
1541 	    # pylint: disable=too-many-branches
1542 	    # pylint: disable=too-many-locals
1543 	    modifiers.ensure_only_supported(
1544 	        "--corosync_conf", "--request-timeout", "-u", "-p"
1545 	    )
1546 	    if argv:
1547 	        raise CmdLineInputError()
1548 	    lib_env = utils.get_lib_env()
1549 	    target_factory = lib_env.get_node_target_factory()
1550 	    corosync_conf = lib_env.get_corosync_conf()
1551 	    cluster_node_list = corosync_conf.get_nodes()
1552 	    cluster_node_names = []
1553 	    missing_name = False
1554 	    for node in cluster_node_list:
1555 	        if node.name:
1556 	            cluster_node_names.append(node.name)
1557 	        else:
1558 	            missing_name = True
1559 	    if missing_name:
1560 	        warn(
1561 	            "Skipping nodes which do not have their name defined in "
1562 	            "corosync.conf, use the 'pcs host auth' command to authenticate "
1563 	            "them"
1564 	        )
1565 	    target_list = []
1566 	    not_authorized_node_name_list = []
1567 	    for node_name in cluster_node_names:
1568 	        try:
1569 	            target_list.append(target_factory.get_target(node_name))
1570 	        except HostNotFound:
1571 	            print_to_stderr("{}: Not authorized".format(node_name))
1572 	            not_authorized_node_name_list.append(node_name)
1573 	    com_cmd = CheckAuth(lib_env.report_processor)
1574 	    com_cmd.set_targets(target_list)
1575 	    not_authorized_node_name_list.extend(
1576 	        run_and_raise(lib_env.get_node_communicator(), com_cmd)
1577 	    )
1578 	    if not_authorized_node_name_list:
1579 	        print(
1580 	            "Nodes to authorize: {}".format(
1581 	                ", ".join(not_authorized_node_name_list)
1582 	            )
1583 	        )
1584 	        username, password = utils.get_user_and_pass()
1585 	        not_auth_node_list = []
1586 	        for node_name in not_authorized_node_name_list:
1587 	            for node in cluster_node_list:
1588 	                if node.name == node_name:
1589 	                    if node.addrs_plain():
1590 	                        not_auth_node_list.append(node)
1591 	                    else:
1592 	                        print_to_stderr(
1593 	                            f"{node.name}: No addresses defined in "
1594 	                            "corosync.conf, use the 'pcs host auth' command to "
1595 	                            "authenticate the node"
1596 	                        )
1597 	        nodes_to_auth_data = {
1598 	            node.name: HostAuthData(
1599 	                username,
1600 	                password,
1601 	                [
1602 	                    Destination(
1603 	                        node.addrs_plain()[0], settings.pcsd_default_port
1604 	                    )
1605 	                ],
1606 	            )
1607 	            for node in not_auth_node_list
1608 	        }
1609 	        lib.auth.auth_hosts(nodes_to_auth_data)
1610 	    else:
1611 	        # TODO backwards compatibility
1612 	        # The command overwrites known-hosts and pcsd_settings.conf on all
1613 	        # cluster nodes with local version, only if all of the nodes are
1614 	        # already authorized. We should investigate what is the reason why
1615 	        # the command does this, and decide if we should drop/keep/change this
1616 	        configs = {}
1617 	        for file_type_code in SYNCED_CONFIGS:
1618 	            file_instance = FileInstance.for_common(file_type_code)
1619 	            if not file_instance.raw_file.exists():
1620 	                # it's not an error if the file does not exist locally, we just
1621 	                # wont send it
1622 	                continue
1623 	            try:
1624 	                configs[file_type_code] = file_instance.read_raw().decode(
1625 	                    "utf-8"
1626 	                )
1627 	            except RawFileError as e:
1628 	                # in case of error when reading some file, we still might be able
1629 	                # to read and send the others without issues
1630 	                lib_env.report_processor.report(
1631 	                    raw_file_error_report(e, is_forced_or_warning=True)
1632 	                )
1633 	        set_configs_cmd = SetConfigs(
1634 	            lib_env.report_processor,
1635 	            corosync_conf.get_cluster_name(),
1636 	            configs,
1637 	            force=True,
1638 	            rejection_severity=reports.ReportItemSeverity.error(),
1639 	        )
1640 	        set_configs_cmd.set_targets(target_list)
1641 	        run_and_raise(lib_env.get_node_communicator(), set_configs_cmd)
1642 	
1643 	
1644 	def _parse_node_options(
1645 	    node: str,
1646 	    options: Argv,
1647 	    additional_options: StringCollection = (),
1648 	    additional_repeatable_options: StringCollection = (),
1649 	) -> dict[str, Union[str, list[str]]]:
1650 	    """
1651 	    Commandline options: no options
1652 	    """
1653 	    ADDR_OPT_KEYWORD = "addr"  # pylint: disable=invalid-name
1654 	    supported_options = {ADDR_OPT_KEYWORD} | set(additional_options)
1655 	    repeatable_options = {ADDR_OPT_KEYWORD} | set(additional_repeatable_options)
1656 	    parser = KeyValueParser(options, repeatable_options)
1657 	    parsed_unique = parser.get_unique()
1658 	    parsed_repeatable = parser.get_repeatable()
1659 	    unknown_options = (
1660 	        set(parsed_unique.keys()) | set(parsed_repeatable)
1661 	    ) - supported_options
1662 	    if unknown_options:
1663 	        raise CmdLineInputError(
1664 	            f"Unknown options {format_list(unknown_options)} for node '{node}'"
1665 	        )
1666 	    parsed_unique["name"] = node
1667 	    if ADDR_OPT_KEYWORD in parsed_repeatable:
1668 	        parsed_repeatable["addrs"] = parsed_repeatable[ADDR_OPT_KEYWORD]
1669 	        del parsed_repeatable[ADDR_OPT_KEYWORD]
1670 	    return parsed_unique | parsed_repeatable
1671 	
1672 	
1673 	TRANSPORT_KEYWORD = "transport"
1674 	TRANSPORT_DEFAULT_SECTION = "__default__"
1675 	LINK_KEYWORD = "link"
1676 	
1677 	
1678 	def _parse_transport(
1679 	    transport_args: Argv,
1680 	) -> tuple[str, dict[str, Union[dict[str, str], list[dict[str, str]]]]]:
1681 	    """
1682 	    Commandline options: no options
1683 	    """
1684 	    if not transport_args:
1685 	        raise CmdLineInputError(
1686 	            f"{TRANSPORT_KEYWORD.capitalize()} type not defined"
1687 	        )
1688 	    transport_type, *transport_options = transport_args
1689 	
1690 	    keywords = {"compression", "crypto", LINK_KEYWORD}
1691 	    parsed_options = parse_args.group_by_keywords(
1692 	        transport_options,
1693 	        keywords,
1694 	        implicit_first_keyword=TRANSPORT_DEFAULT_SECTION,
1695 	    )
1696 	    options: dict[str, Union[dict[str, str], list[dict[str, str]]]] = {
1697 	        section: KeyValueParser(
1698 	            parsed_options.get_args_flat(section)
1699 	        ).get_unique()
1700 	        for section in keywords | {TRANSPORT_DEFAULT_SECTION}
1701 	        if section != LINK_KEYWORD
1702 	    }
1703 	    options[LINK_KEYWORD] = [
1704 	        KeyValueParser(link_options).get_unique()
1705 	        for link_options in parsed_options.get_args_groups(LINK_KEYWORD)
1706 	    ]
1707 	
1708 	    return transport_type, options
1709 	
1710 	
1711 	def cluster_setup(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1712 	    """
1713 	    Options:
1714 	      * --wait - only effective when used with --start
1715 	      * --start - start cluster
1716 	      * --enable - enable cluster
1717 	      * --force - some validation issues and unresolvable addresses are treated
1718 	        as warnings
1719 	      * --no-keys-sync - do not create and distribute pcsd ssl cert and key,
1720 	        corosync and pacemaker authkeys
1721 	      * --no-cluster-uuid - do not generate a cluster UUID during setup
1722 	      * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1723 	      * --overwrite - allow overwriting existing files
1724 	    """
1725 	    # pylint: disable=too-many-locals
1726 	    is_local = modifiers.is_specified("--corosync_conf")
1727 	
1728 	    allowed_options_common = ["--force", "--no-cluster-uuid"]
1729 	    allowed_options_live = [
1730 	        "--wait",
1731 	        "--start",
1732 	        "--enable",
1733 	        "--no-keys-sync",
1734 	    ]
1735 	    allowed_options_local = ["--corosync_conf", "--overwrite"]
1736 	    modifiers.ensure_only_supported(
1737 	        *(
1738 	            allowed_options_common
1739 	            + allowed_options_live
1740 	            + allowed_options_local
1741 	        ),
1742 	    )
1743 	    if is_local and modifiers.is_specified_any(allowed_options_live):
1744 	        raise CmdLineInputError(
1745 	            f"Cannot specify any of {format_list(allowed_options_live)} "
1746 	            "when '--corosync_conf' is specified"
1747 	        )
1748 	    if not is_local and modifiers.is_specified("--overwrite"):
1749 	        raise CmdLineInputError(
1750 	            "Cannot specify '--overwrite' when '--corosync_conf' is not "
1751 	            "specified"
1752 	        )
1753 	
1754 	    if len(argv) < 2:
1755 	        raise CmdLineInputError()
1756 	    cluster_name, *argv = argv
1757 	    keywords = [TRANSPORT_KEYWORD, "totem", "quorum"]
1758 	    parsed_args = parse_args.group_by_keywords(
1759 	        argv, keywords, implicit_first_keyword="nodes"
1760 	    )
1761 	    parsed_args.ensure_unique_keywords()
1762 	    nodes = [
1763 	        _parse_node_options(node, options)
1764 	        for node, options in parse_args.split_list_by_any_keywords(
1765 	            parsed_args.get_args_flat("nodes"), "node name"
1766 	        ).items()
1767 	    ]
1768 	
1769 	    transport_type = None
1770 	    transport_options: dict[
1771 	        str, Union[dict[str, str], list[dict[str, str]]]
1772 	    ] = {}
1773 	
1774 	    if parsed_args.has_keyword(TRANSPORT_KEYWORD):
1775 	        transport_type, transport_options = _parse_transport(
1776 	            parsed_args.get_args_flat(TRANSPORT_KEYWORD)
1777 	        )
1778 	
1779 	    force_flags = []
1780 	    if modifiers.get("--force"):
1781 	        force_flags.append(reports.codes.FORCE)
1782 	
1783 	    totem_options = KeyValueParser(
1784 	        parsed_args.get_args_flat("totem")
1785 	    ).get_unique()
1786 	    quorum_options = KeyValueParser(
1787 	        parsed_args.get_args_flat("quorum")
1788 	    ).get_unique()
1789 	
1790 	    if not is_local:
1791 	        lib.cluster.setup(
1792 	            cluster_name,
1793 	            nodes,
1794 	            transport_type=transport_type,
1795 	            transport_options=transport_options.get(
1796 	                TRANSPORT_DEFAULT_SECTION, {}
1797 	            ),
1798 	            link_list=transport_options.get(LINK_KEYWORD, []),
1799 	            compression_options=transport_options.get("compression", {}),
1800 	            crypto_options=transport_options.get("crypto", {}),
1801 	            totem_options=totem_options,
1802 	            quorum_options=quorum_options,
1803 	            wait=modifiers.get("--wait"),
1804 	            start=modifiers.get("--start"),
1805 	            enable=modifiers.get("--enable"),
1806 	            no_keys_sync=modifiers.get("--no-keys-sync"),
1807 	            no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1808 	            force_flags=force_flags,
1809 	        )
1810 	        return
1811 	
1812 	    corosync_conf_data = lib.cluster.setup_local(
1813 	        cluster_name,
1814 	        nodes,
1815 	        transport_type=transport_type,
1816 	        transport_options=transport_options.get(TRANSPORT_DEFAULT_SECTION, {}),
1817 	        link_list=transport_options.get(LINK_KEYWORD, []),
1818 	        compression_options=transport_options.get("compression", {}),
1819 	        crypto_options=transport_options.get("crypto", {}),
1820 	        totem_options=totem_options,
1821 	        quorum_options=quorum_options,
1822 	        no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1823 	        force_flags=force_flags,
1824 	    )
1825 	
1826 	    corosync_conf_file = pcs_file.RawFile(
1827 	        file_metadata.for_file_type(
1828 	            file_type_codes.COROSYNC_CONF, modifiers.get("--corosync_conf")
1829 	        )
1830 	    )
1831 	    overwrite = modifiers.is_specified("--overwrite")
1832 	    try:
1833 	        corosync_conf_file.write(corosync_conf_data, can_overwrite=overwrite)
1834 	    except pcs_file.FileAlreadyExists as e:
1835 	        utils.err(
1836 	            reports.messages.FileAlreadyExists(
1837 	                e.metadata.file_type_code,
1838 	                e.metadata.path,
1839 	            ).message
1840 	            + ", use --overwrite to overwrite existing file(s)"
1841 	        )
1842 	    except pcs_file.RawFileError as e:
1843 	        utils.err(
1844 	            reports.messages.FileIoError(
1845 	                e.metadata.file_type_code,
1846 	                e.action,
1847 	                e.reason,
1848 	                file_path=e.metadata.path,
1849 	            ).message
1850 	        )
1851 	
1852 	
1853 	def config_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1854 	    """
1855 	    Options:
1856 	      * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1857 	    """
1858 	    modifiers.ensure_only_supported("--corosync_conf")
1859 	    parsed_args = parse_args.group_by_keywords(
1860 	        argv,
1861 	        ["transport", "compression", "crypto", "totem"],
1862 	    )
1863 	
1864 	    transport_options = KeyValueParser(
1865 	        parsed_args.get_args_flat("transport")
1866 	    ).get_unique()
1867 	    compression_options = KeyValueParser(
1868 	        parsed_args.get_args_flat("compression")
1869 	    ).get_unique()
1870 	    crypto_options = KeyValueParser(
1871 	        parsed_args.get_args_flat("crypto")
1872 	    ).get_unique()
1873 	    totem_options = KeyValueParser(
1874 	        parsed_args.get_args_flat("totem")
1875 	    ).get_unique()
1876 	
1877 	    if not modifiers.is_specified("--corosync_conf"):
1878 	        lib.cluster.config_update(
1879 	            transport_options,
1880 	            compression_options,
1881 	            crypto_options,
1882 	            totem_options,
1883 	        )
1884 	        return
1885 	
1886 	    _corosync_conf_local_cmd_call(
1887 	        modifiers.get("--corosync_conf"),
1888 	        lambda corosync_conf_content: lib.cluster.config_update_local(
1889 	            corosync_conf_content,
1890 	            transport_options,
1891 	            compression_options,
1892 	            crypto_options,
1893 	            totem_options,
1894 	        ),
1895 	    )
1896 	
1897 	
1898 	def _format_options(label: str, options: Mapping[str, str]) -> list[str]:
1899 	    output = []
1900 	    if options:
1901 	        output.append(f"{label}:")
1902 	        output.extend(
1903 	            indent([f"{opt}: {val}" for opt, val in sorted(options.items())])
1904 	        )
1905 	    return output
1906 	
1907 	
1908 	def _format_nodes(nodes: Iterable[CorosyncNodeDto]) -> list[str]:
1909 	    output = ["Nodes:"]
1910 	    for node in sorted(nodes, key=lambda node: node.name):
1911 	        node_attrs = [
1912 	            f"Link {addr.link} address: {addr.addr}"
1913 	            for addr in sorted(node.addrs, key=lambda addr: addr.link)
1914 	        ] + [f"nodeid: {node.nodeid}"]
1915 	        output.extend(indent([f"{node.name}:"] + indent(node_attrs)))
1916 	    return output
1917 	
1918 	
1919 	def config_show(
1920 	    lib: Any, argv: Argv, modifiers: parse_args.InputModifiers
1921 	) -> None:
1922 	    """
1923 	    Options:
1924 	      * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1925 	      * --output-format - supported formats: text, cmd, json
1926 	    """
1927 	    modifiers.ensure_only_supported(
1928 	        "--corosync_conf", output_format_supported=True
1929 	    )
1930 	    if argv:
1931 	        raise CmdLineInputError()
1932 	    output_format = modifiers.get_output_format()
1933 	    corosync_conf_dto = lib.cluster.get_corosync_conf_struct()
1934 	    if output_format == OUTPUT_FORMAT_VALUE_CMD:
1935 	        if corosync_conf_dto.quorum_device is not None:
1936 	            warn(
1937 	                "Quorum device configuration detected but not yet supported by "
1938 	                "this command."
1939 	            )
1940 	        output = " \\\n".join(_config_get_cmd(corosync_conf_dto))
1941 	    elif output_format == OUTPUT_FORMAT_VALUE_JSON:
1942 	        output = json.dumps(dto.to_dict(corosync_conf_dto))
1943 	    else:
1944 	        output = "\n".join(_config_get_text(corosync_conf_dto))
1945 	    print(output)
1946 	
1947 	
1948 	def _config_get_text(corosync_conf: CorosyncConfDto) -> list[str]:
1949 	    lines = [f"Cluster Name: {corosync_conf.cluster_name}"]
1950 	    if corosync_conf.cluster_uuid:
1951 	        lines.append(f"Cluster UUID: {corosync_conf.cluster_uuid}")
1952 	    lines.append(f"Transport: {corosync_conf.transport.lower()}")
1953 	    lines.extend(_format_nodes(corosync_conf.nodes))
1954 	    if corosync_conf.links_options:
1955 	        lines.append("Links:")
1956 	        for linknum, link_options in sorted(
1957 	            corosync_conf.links_options.items()
1958 	        ):
1959 	            lines.extend(
1960 	                indent(_format_options(f"Link {linknum}", link_options))
1961 	            )
1962 	
1963 	    lines.extend(
1964 	        _format_options("Transport Options", corosync_conf.transport_options)
1965 	    )
1966 	    lines.extend(
1967 	        _format_options(
1968 	            "Compression Options", corosync_conf.compression_options
1969 	        )
1970 	    )
1971 	    lines.extend(
1972 	        _format_options("Crypto Options", corosync_conf.crypto_options)
1973 	    )
1974 	    lines.extend(_format_options("Totem Options", corosync_conf.totem_options))
1975 	    lines.extend(
1976 	        _format_options("Quorum Options", corosync_conf.quorum_options)
1977 	    )
1978 	    if corosync_conf.quorum_device:
1979 	        lines.append(f"Quorum Device: {corosync_conf.quorum_device.model}")
1980 	        lines.extend(
1981 	            indent(
1982 	                _format_options(
1983 	                    "Options", corosync_conf.quorum_device.generic_options
1984 	                )
1985 	            )
1986 	        )
1987 	        lines.extend(
1988 	            indent(
1989 	                _format_options(
1990 	                    "Model Options",
1991 	                    corosync_conf.quorum_device.model_options,
1992 	                )
1993 	            )
1994 	        )
1995 	        lines.extend(
1996 	            indent(
1997 	                _format_options(
1998 	                    "Heuristics",
1999 	                    corosync_conf.quorum_device.heuristics_options,
2000 	                )
2001 	            )
2002 	        )
2003 	    return lines
2004 	
2005 	
2006 	def _corosync_node_to_cmd_line(node: CorosyncNodeDto) -> str:
2007 	    return " ".join(
2008 	        [node.name]
2009 	        + [
2010 	            f"addr={addr.addr}"
2011 	            for addr in sorted(node.addrs, key=lambda addr: addr.link)
2012 	        ]
2013 	    )
2014 	
2015 	
2016 	def _section_to_lines(
2017 	    options: Mapping[str, str], keyword: Optional[str] = None
2018 	) -> list[str]:
2019 	    output: list[str] = []
2020 	    if options:
2021 	        if keyword:
2022 	            output.append(keyword)
2023 	        output.extend(
2024 	            indent([f"{key}={val}" for key, val in sorted(options.items())])
2025 	        )
2026 	    return indent(output)
2027 	
2028 	
2029 	def _config_get_cmd(corosync_conf: CorosyncConfDto) -> list[str]:
2030 	    lines = [f"pcs cluster setup {corosync_conf.cluster_name}"]
2031 	    lines += indent(
2032 	        [
2033 	            _corosync_node_to_cmd_line(node)
2034 	            for node in sorted(
2035 	                corosync_conf.nodes, key=lambda node: node.nodeid
2036 	            )
2037 	        ]
2038 	    )
2039 	    transport = [
2040 	        "transport",
2041 	        str(corosync_conf.transport.value).lower(),
2042 	    ] + _section_to_lines(corosync_conf.transport_options)
2043 	    for _, link in sorted(corosync_conf.links_options.items()):
2044 	        transport.extend(_section_to_lines(link, "link"))
2045 	    transport.extend(
2046 	        _section_to_lines(corosync_conf.compression_options, "compression")
2047 	    )
2048 	    transport.extend(_section_to_lines(corosync_conf.crypto_options, "crypto"))
2049 	    lines.extend(indent(transport))
2050 	    lines.extend(_section_to_lines(corosync_conf.totem_options, "totem"))
2051 	    lines.extend(_section_to_lines(corosync_conf.quorum_options, "quorum"))
2052 	    if not corosync_conf.cluster_uuid:
2053 	        lines.extend(indent(["--no-cluster-uuid"]))
2054 	    return lines
2055 	
2056 	
2057 	def _parse_add_node(argv: Argv) -> dict[str, Union[str, list[str]]]:
2058 	    DEVICE_KEYWORD = "device"  # pylint: disable=invalid-name
2059 	    WATCHDOG_KEYWORD = "watchdog"  # pylint: disable=invalid-name
2060 	    hostname, *argv = argv
2061 	    node_dict = _parse_node_options(
2062 	        hostname,
2063 	        argv,
2064 	        additional_options={DEVICE_KEYWORD, WATCHDOG_KEYWORD},
2065 	        additional_repeatable_options={DEVICE_KEYWORD},
2066 	    )
2067 	    if DEVICE_KEYWORD in node_dict:
2068 	        node_dict[f"{DEVICE_KEYWORD}s"] = node_dict[DEVICE_KEYWORD]
2069 	        del node_dict[DEVICE_KEYWORD]
2070 	    return node_dict
2071 	
2072 	
2073 	def node_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2074 	    """
2075 	    Options:
2076 	      * --wait - wait until new node will start up, effective only when --start
2077 	        is specified
2078 	      * --start - start new node
2079 	      * --enable - enable new node
2080 	      * --force - treat validation issues and not resolvable addresses as
2081 	        warnings instead of errors
2082 	      * --skip-offline - skip unreachable nodes
2083 	      * --no-watchdog-validation - do not validatate watchdogs
2084 	      * --request-timeout - HTTP request timeout
2085 	    """
2086 	    modifiers.ensure_only_supported(
2087 	        "--wait",
2088 	        "--start",
2089 	        "--enable",
2090 	        "--force",
2091 	        "--skip-offline",
2092 	        "--no-watchdog-validation",
2093 	        "--request-timeout",
2094 	    )
2095 	    if not argv:
2096 	        raise CmdLineInputError()
2097 	
2098 	    node_dict = _parse_add_node(argv)
2099 	
2100 	    force_flags = []
2101 	    if modifiers.get("--force"):
2102 	        force_flags.append(reports.codes.FORCE)
2103 	    if modifiers.get("--skip-offline"):
2104 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2105 	
2106 	    lib.cluster.add_nodes(
2107 	        nodes=[node_dict],
2108 	        wait=modifiers.get("--wait"),
2109 	        start=modifiers.get("--start"),
2110 	        enable=modifiers.get("--enable"),
2111 	        no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
2112 	        force_flags=force_flags,
2113 	    )
2114 	
2115 	
2116 	def remove_nodes_from_cib(
2117 	    lib: Any, argv: Argv, modifiers: InputModifiers
2118 	) -> None:
2119 	    """
2120 	    Options: no options
2121 	    """
2122 	    modifiers.ensure_only_supported()
2123 	    if not argv:
2124 	        raise CmdLineInputError("No nodes specified")
2125 	    lib.cluster.remove_nodes_from_cib(argv)
2126 	
2127 	
2128 	def link_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2129 	    """
2130 	    Options:
2131 	      * --force - treat validation issues and not resolvable addresses as
2132 	        warnings instead of errors
2133 	      * --skip-offline - skip unreachable nodes
2134 	      * --request-timeout - HTTP request timeout
2135 	    """
2136 	    modifiers.ensure_only_supported(
2137 	        "--force", "--request-timeout", "--skip-offline"
2138 	    )
2139 	    if not argv:
2140 	        raise CmdLineInputError()
2141 	
2142 	    force_flags = []
2143 	    if modifiers.get("--force"):
2144 	        force_flags.append(reports.codes.FORCE)
2145 	    if modifiers.get("--skip-offline"):
2146 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2147 	
2148 	    parsed = parse_args.group_by_keywords(
2149 	        argv, {"options"}, implicit_first_keyword="nodes"
2150 	    )
2151 	    parsed.ensure_unique_keywords()
2152 	
2153 	    lib.cluster.add_link(
2154 	        KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2155 	        KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2156 	        force_flags=force_flags,
2157 	    )
2158 	
2159 	
2160 	def link_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2161 	    """
2162 	    Options:
2163 	      * --skip-offline - skip unreachable nodes
2164 	      * --request-timeout - HTTP request timeout
2165 	    """
2166 	    modifiers.ensure_only_supported("--request-timeout", "--skip-offline")
2167 	    if not argv:
2168 	        raise CmdLineInputError()
2169 	
2170 	    force_flags = []
2171 	    if modifiers.get("--skip-offline"):
2172 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2173 	
2174 	    lib.cluster.remove_links(argv, force_flags=force_flags)
2175 	
2176 	
2177 	def link_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2178 	    """
2179 	    Options:
2180 	      * --force - treat validation issues and not resolvable addresses as
2181 	        warnings instead of errors
2182 	      * --skip-offline - skip unreachable nodes
2183 	      * --request-timeout - HTTP request timeout
2184 	    """
2185 	    modifiers.ensure_only_supported(
2186 	        "--force", "--request-timeout", "--skip-offline"
2187 	    )
2188 	    if len(argv) < 2:
2189 	        raise CmdLineInputError()
2190 	
2191 	    force_flags = []
2192 	    if modifiers.get("--force"):
2193 	        force_flags.append(reports.codes.FORCE)
2194 	    if modifiers.get("--skip-offline"):
2195 	        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2196 	
2197 	    linknumber = argv[0]
2198 	    parsed = parse_args.group_by_keywords(
2199 	        argv[1:], {"options"}, implicit_first_keyword="nodes"
2200 	    )
2201 	    parsed.ensure_unique_keywords()
2202 	
2203 	    lib.cluster.update_link(
2204 	        linknumber,
2205 	        KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2206 	        KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2207 	        force_flags=force_flags,
2208 	    )
2209 	
2210 	
2211 	def generate_uuid(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2212 	    """
2213 	    Options:
2214 	      * --force - allow to rewrite an existing UUID in corosync.conf
2215 	      * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
2216 	    """
2217 	    modifiers.ensure_only_supported("--force", "--corosync_conf")
2218 	    if argv:
2219 	        raise CmdLineInputError()
2220 	
2221 	    force_flags = []
2222 	    if modifiers.get("--force"):
2223 	        force_flags.append(reports.codes.FORCE)
2224 	
2225 	    if not modifiers.is_specified("--corosync_conf"):
2226 	        lib.cluster.generate_cluster_uuid(force_flags=force_flags)
2227 	        return
2228 	
2229 	    _corosync_conf_local_cmd_call(
2230 	        modifiers.get("--corosync_conf"),
2231 	        lambda corosync_conf_content: lib.cluster.generate_cluster_uuid_local(
2232 	            corosync_conf_content, force_flags=force_flags
2233 	        ),
2234 	    )
2235