1 # pylint: disable=too-many-lines
2 import contextlib
3 import datetime
4 import json
5 import math
6 import os
7 import subprocess
8 import sys
9 import tempfile
10 import time
11 import xml.dom.minidom
12 from typing import Any, Callable, Iterable, Mapping, Optional, Union, cast
13 from xml.parsers.expat import ExpatError
14
15 import pcs.lib.pacemaker.live as lib_pacemaker
16 from pcs import settings, utils
17 from pcs.cli.common import parse_args
18 from pcs.cli.common.errors import (
19 ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
20 CmdLineInputError,
21 )
22 from pcs.cli.common.parse_args import (
23 OUTPUT_FORMAT_VALUE_CMD,
24 OUTPUT_FORMAT_VALUE_JSON,
25 Argv,
26 InputModifiers,
27 KeyValueParser,
28 )
29 from pcs.cli.common.tools import print_to_stderr
30 from pcs.cli.file import metadata as file_metadata
31 from pcs.cli.reports import process_library_reports
32 from pcs.cli.reports.messages import report_item_msg_from_dto
33 from pcs.cli.reports.output import deprecation_warning, warn
34 from pcs.common import file as pcs_file
35 from pcs.common import file_type_codes, reports
36 from pcs.common.auth import HostAuthData
37 from pcs.common.corosync_conf import CorosyncConfDto, CorosyncNodeDto
38 from pcs.common.file import RawFileError
39 from pcs.common.host import Destination
40 from pcs.common.interface import dto
41 from pcs.common.node_communicator import HostNotFound, Request, RequestData
42 from pcs.common.str_tools import format_list, indent, join_multilines
43 from pcs.common.tools import format_os_error
44 from pcs.common.types import StringCollection, StringIterable
45 from pcs.lib import sbd as lib_sbd
46 from pcs.lib.commands.remote_node import _destroy_pcmk_remote_env
47 from pcs.lib.communication.nodes import CheckAuth
48 from pcs.lib.communication.pcs_cfgsync import SetConfigs
49 from pcs.lib.communication.tools import RunRemotelyBase, run_and_raise
50 from pcs.lib.communication.tools import run as run_com_cmd
51 from pcs.lib.corosync import qdevice_net
52 from pcs.lib.corosync.live import QuorumStatusException, QuorumStatusFacade
53 from pcs.lib.errors import LibraryError
54 from pcs.lib.file.instance import FileInstance
55 from pcs.lib.file.raw_file import raw_file_error_report
56 from pcs.lib.node import get_existing_nodes_names
57 from pcs.lib.pcs_cfgsync.const import SYNCED_CONFIGS
58 from pcs.utils import parallel_for_nodes
59
60
61 def _corosync_conf_local_cmd_call(
62 corosync_conf_path: parse_args.ModifierValueType,
63 lib_cmd: Callable[[bytes], bytes],
64 ) -> None:
65 """
66 Call a library command that requires modifications of a corosync.conf file
67 supplied as an argument
68
69 The lib command needs to take the corosync.conf file content as its first
70 argument
71
72 lib_cmd -- the lib command to be called
73 """
74 corosync_conf_file = pcs_file.RawFile(
75 file_metadata.for_file_type(
76 file_type_codes.COROSYNC_CONF, corosync_conf_path
77 )
78 )
79
80 try:
81 corosync_conf_file.write(
82 lib_cmd(
83 corosync_conf_file.read(),
84 ),
85 can_overwrite=True,
86 )
87 except pcs_file.RawFileError as e:
88 raise CmdLineInputError(
89 reports.messages.FileIoError(
90 e.metadata.file_type_code,
91 e.action,
92 e.reason,
93 file_path=e.metadata.path,
94 ).message
95 ) from e
96
97
98 def cluster_cib_upgrade_cmd(
99 lib: Any, argv: Argv, modifiers: InputModifiers
100 ) -> None:
101 """
102 Options:
103 * -f - CIB file
104 """
105 del lib
106 modifiers.ensure_only_supported("-f")
107 if argv:
108 raise CmdLineInputError()
109 utils.cluster_upgrade()
110
111
112 def cluster_disable_cmd(
113 lib: Any, argv: Argv, modifiers: InputModifiers
114 ) -> None:
115 """
116 Options:
117 * --all - disable all cluster nodes
118 * --request-timeout - timeout for HTTP requests - effective only when at
119 least one node has been specified or --all has been used
120 """
121 del lib
122 modifiers.ensure_only_supported("--all", "--request-timeout")
123 if modifiers.get("--all"):
124 if argv:
125 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
126 disable_cluster_all()
127 else:
128 disable_cluster(argv)
129
130
131 def cluster_enable_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
132 """
133 Options:
134 * --all - enable all cluster nodes
135 * --request-timeout - timeout for HTTP requests - effective only when at
136 least one node has been specified or --all has been used
137 """
138 del lib
139 modifiers.ensure_only_supported("--all", "--request-timeout")
140 if modifiers.get("--all"):
141 if argv:
142 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
143 enable_cluster_all()
144 else:
145 enable_cluster(argv)
146
147
148 def cluster_stop_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
149 """
150 Options:
151 * --force - no error when possible quorum loss
152 * --request-timeout - timeout for HTTP requests - effective only when at
153 least one node has been specified
154 * --pacemaker - stop pacemaker, only effective when no node has been
155 specified
156 * --corosync - stop corosync, only effective when no node has been
157 specified
158 * --all - stop all cluster nodes
159 """
160 del lib
161 modifiers.ensure_only_supported(
162 "--wait",
163 "--request-timeout",
164 "--pacemaker",
165 "--corosync",
166 "--all",
167 "--force",
168 )
169 if modifiers.get("--all"):
170 if argv:
171 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
172 stop_cluster_all()
173 else:
174 stop_cluster(argv)
175
176
177 def cluster_start_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
178 """
179 Options:
180 * --wait
181 * --request-timeout - timeout for HTTP requests, have effect only if at
182 least one node have been specified
183 * --all - start all cluster nodes
184 """
185 del lib
186 modifiers.ensure_only_supported(
187 "--wait", "--request-timeout", "--all", "--corosync_conf"
188 )
189 if modifiers.get("--all"):
190 if argv:
191 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
192 start_cluster_all()
193 else:
194 start_cluster(argv)
195
196
197 def authkey_corosync(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
198 """
199 Options:
200 * --force - skip check for authkey length
201 * --request-timeout - timeout for HTTP requests
202 * --skip-offline - skip unreachable nodes
203 """
204 modifiers.ensure_only_supported(
205 "--force", "--skip-offline", "--request-timeout"
206 )
207 if len(argv) > 1:
208 raise CmdLineInputError()
209 force_flags = []
210 if modifiers.get("--force"):
211 force_flags.append(reports.codes.FORCE)
212 if modifiers.get("--skip-offline"):
213 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
214 corosync_authkey = None
215 if argv:
216 try:
217 with open(argv[0], "rb") as file:
218 corosync_authkey = file.read()
219 except OSError as e:
220 utils.err(f"Unable to read file '{argv[0]}': {format_os_error(e)}")
221 lib.cluster.corosync_authkey_change(
222 corosync_authkey=corosync_authkey,
223 force_flags=force_flags,
224 )
225
226
227 def sync_nodes(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
228 """
229 Options:
230 * --request-timeout - timeout for HTTP requests
231 """
232 del lib
233 modifiers.ensure_only_supported("--request-timeout")
234 if argv:
235 raise CmdLineInputError()
236
237 config = utils.getCorosyncConf()
238 nodes, report_list = get_existing_nodes_names(
239 utils.get_corosync_conf_facade(conf_text=config)
240 )
241 if not nodes:
242 report_list.append(
243 reports.ReportItem.error(
244 reports.messages.CorosyncConfigNoNodesDefined()
245 )
246 )
247 if report_list:
248 process_library_reports(report_list)
249
250 for node in nodes:
251 utils.setCorosyncConfig(node, config)
252
253 warn(
254 "Corosync configuration has been synchronized, please reload corosync "
255 "daemon using 'pcs cluster reload corosync' command."
256 )
257
258
259 def start_cluster(argv: Argv) -> None:
260 """
261 Commandline options:
262 * --wait
263 * --request-timeout - timeout for HTTP requests, have effect only if at
264 least one node have been specified
265 """
266 wait = False
267 wait_timeout = None
268 if "--wait" in utils.pcs_options:
269 wait_timeout = utils.validate_wait_get_timeout(False)
270 wait = True
271
272 if argv:
273 nodes = set(argv) # unique
274 start_cluster_nodes(nodes)
275 if wait:
276 wait_for_nodes_started(nodes, wait_timeout)
277 return
278
279 if not utils.hasCorosyncConf():
280 utils.err("cluster is not currently configured on this node")
281
282 print_to_stderr("Starting Cluster...")
283 service_list = ["corosync"]
284 if utils.need_to_handle_qdevice_service():
285 service_list.append("corosync-qdevice")
286 service_list.append("pacemaker")
287 for service in service_list:
288 utils.start_service(service)
289 if wait:
290 wait_for_nodes_started([], wait_timeout)
291
292
293 def start_cluster_all() -> None:
294 """
295 Commandline options:
296 * --wait
297 * --request-timeout - timeout for HTTP requests
298 """
299 wait = False
300 wait_timeout = None
301 if "--wait" in utils.pcs_options:
302 wait_timeout = utils.validate_wait_get_timeout(False)
303 wait = True
304
305 all_nodes, report_list = get_existing_nodes_names(
306 utils.get_corosync_conf_facade()
307 )
308 if not all_nodes:
309 report_list.append(
310 reports.ReportItem.error(
311 reports.messages.CorosyncConfigNoNodesDefined()
312 )
313 )
314 if report_list:
315 process_library_reports(report_list)
316
317 start_cluster_nodes(all_nodes)
318 if wait:
319 wait_for_nodes_started(all_nodes, wait_timeout)
320
321
322 def start_cluster_nodes(nodes: StringCollection) -> None:
323 """
324 Commandline options:
325 * --request-timeout - timeout for HTTP requests
326 """
327 # Large clusters take longer time to start up. So we make the timeout longer
328 # for each 8 nodes:
329 # 1 - 8 nodes: 1 * timeout
330 # 9 - 16 nodes: 2 * timeout
331 # 17 - 24 nodes: 3 * timeout
332 # and so on
333 # Users can override this and set their own timeout by specifying
334 # the --request-timeout option (see utils.sendHTTPRequest).
335 timeout = int(
336 settings.default_request_timeout * math.ceil(len(nodes) / 8.0)
337 )
338 utils.read_known_hosts_file() # cache known hosts
339 node_errors = parallel_for_nodes(
340 utils.startCluster, nodes, quiet=True, timeout=timeout
341 )
342 if node_errors:
343 utils.err(
344 "unable to start all nodes\n" + "\n".join(node_errors.values())
345 )
346
347
348 def is_node_fully_started(node_status) -> bool:
349 """
350 Commandline options: no options
351 """
352 return (
353 "online" in node_status
354 and "pending" in node_status
355 and node_status["online"]
356 and not node_status["pending"]
357 )
358
359
360 def wait_for_local_node_started(
361 stop_at: datetime.datetime, interval: float
362 ) -> tuple[int, str]:
363 """
364 Commandline options: no options
365 """
366 try:
367 while True:
368 time.sleep(interval)
369 node_status = lib_pacemaker.get_local_node_status(
370 utils.cmd_runner()
371 )
372 if is_node_fully_started(node_status):
373 return 0, "Started"
374 if datetime.datetime.now() > stop_at:
375 return 1, "Waiting timeout"
376 except LibraryError as e:
377 return (
378 1,
379 "Unable to get node status: {0}".format(
380 "\n".join(
381 report_item_msg_from_dto(
382 cast(reports.ReportItemDto, item).message
383 ).message
384 for item in e.args
385 )
386 ),
387 )
388
389
390 def wait_for_remote_node_started(
391 node: str, stop_at: datetime.datetime, interval: float
392 ) -> tuple[int, str]:
393 """
394 Commandline options:
395 * --request-timeout - timeout for HTTP requests
396 """
397 while True:
398 time.sleep(interval)
399 code, output = utils.getPacemakerNodeStatus(node)
400 # HTTP error, permission denied or unable to auth
401 # there is no point in trying again as it won't get magically fixed
402 if code in [1, 3, 4]:
403 return 1, output
404 if code == 0:
405 try:
406 node_status = json.loads(output)
407 if is_node_fully_started(node_status):
408 return 0, "Started"
409 except (ValueError, KeyError):
410 # this won't get fixed either
411 return 1, "Unable to get node status"
412 if datetime.datetime.now() > stop_at:
413 return 1, "Waiting timeout"
414
415
416 def wait_for_nodes_started(
417 node_list: StringIterable, timeout: Optional[int] = None
418 ) -> None:
419 """
420 Commandline options:
421 * --request-timeout - timeout for HTTP request, effective only if
422 node_list is not empty list
423 """
424 timeout = 60 * 15 if timeout is None else timeout
425 interval = 2
426 stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
427 print_to_stderr("Waiting for node(s) to start...")
428 if not node_list:
429 code, output = wait_for_local_node_started(stop_at, interval)
430 if code != 0:
431 utils.err(output)
432 else:
433 print_to_stderr(output)
434 else:
435 utils.read_known_hosts_file() # cache known hosts
436 node_errors = parallel_for_nodes(
437 wait_for_remote_node_started, node_list, stop_at, interval
438 )
439 if node_errors:
440 utils.err("unable to verify all nodes have started")
441
442
443 def stop_cluster_all() -> None:
444 """
445 Commandline options:
446 * --force - no error when possible quorum loss
447 * --request-timeout - timeout for HTTP requests
448 """
449 all_nodes, report_list = get_existing_nodes_names(
450 utils.get_corosync_conf_facade()
451 )
452 if not all_nodes:
453 report_list.append(
454 reports.ReportItem.error(
455 reports.messages.CorosyncConfigNoNodesDefined()
456 )
457 )
458 if report_list:
459 process_library_reports(report_list)
460
461 stop_cluster_nodes(all_nodes)
462
463
464 def stop_cluster_nodes(nodes: StringCollection) -> None: # noqa: PLR0912
465 """
466 Commandline options:
467 * --force - no error when possible quorum loss
468 * --request-timeout - timeout for HTTP requests
469 """
470 # pylint: disable=too-many-branches
471 all_nodes, report_list = get_existing_nodes_names(
472 utils.get_corosync_conf_facade()
473 )
474 unknown_nodes = set(nodes) - set(all_nodes)
475 if unknown_nodes:
476 if report_list:
477 process_library_reports(report_list)
478 utils.err(
479 "nodes '%s' do not appear to exist in configuration"
480 % "', '".join(sorted(unknown_nodes))
481 )
482
483 utils.read_known_hosts_file() # cache known hosts
484 stopping_all = set(nodes) >= set(all_nodes)
485 if "--force" not in utils.pcs_options and not stopping_all:
486 error_list = []
487 for node in nodes:
488 retval, data = utils.get_remote_quorumtool_output(node)
489 if retval != 0:
490 error_list.append(node + ": " + data)
491 continue
492 try:
493 quorum_status_facade = QuorumStatusFacade.from_string(data)
494 if not quorum_status_facade.is_quorate:
495 # Get quorum status from a quorate node, non-quorate nodes
496 # may provide inaccurate info. If no node is quorate, there
497 # is no quorum to be lost and therefore no error to be
498 # reported.
499 continue
500 if quorum_status_facade.stopping_nodes_cause_quorum_loss(nodes):
501 utils.err(
502 "Stopping the node(s) will cause a loss of the quorum"
503 + ", use --force to override"
504 )
505 else:
506 # We have the info, no need to print errors
507 error_list = []
508 break
509 except QuorumStatusException:
510 if not utils.is_node_offline_by_quorumtool_output(data):
511 error_list.append(node + ": Unable to get quorum status")
512 # else the node seems to be stopped already
513 if error_list:
514 utils.err(
515 "Unable to determine whether stopping the nodes will cause "
516 + "a loss of the quorum, use --force to override\n"
517 + "\n".join(error_list)
518 )
519
520 was_error = False
521 node_errors = parallel_for_nodes(
522 utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
523 )
524 accessible_nodes = [node for node in nodes if node not in node_errors]
525 if node_errors:
526 utils.err(
527 "unable to stop all nodes\n" + "\n".join(node_errors.values()),
528 exit_after_error=not accessible_nodes,
529 )
530 was_error = True
531
532 for node in node_errors:
533 print_to_stderr(
534 "{0}: Not stopping cluster - node is unreachable".format(node)
535 )
536
537 node_errors = parallel_for_nodes(
538 utils.stopCorosync, accessible_nodes, quiet=True
539 )
540 if node_errors:
541 utils.err(
542 "unable to stop all nodes\n" + "\n".join(node_errors.values())
543 )
544 if was_error:
545 utils.err("unable to stop all nodes")
546
547
548 def enable_cluster(argv: Argv) -> None:
549 """
550 Commandline options:
551 * --request-timeout - timeout for HTTP requests, effective only if at
552 least one node has been specified
553 """
554 if argv:
555 enable_cluster_nodes(argv)
556 return
557
558 try:
559 utils.enableServices()
560 except LibraryError as e:
561 process_library_reports(list(e.args))
562
563
564 def disable_cluster(argv: Argv) -> None:
565 """
566 Commandline options:
567 * --request-timeout - timeout for HTTP requests, effective only if at
568 least one node has been specified
569 """
570 if argv:
571 disable_cluster_nodes(argv)
572 return
573
574 try:
575 utils.disableServices()
576 except LibraryError as e:
577 process_library_reports(list(e.args))
578
579
580 def enable_cluster_all() -> None:
581 """
582 Commandline options:
583 * --request-timeout - timeout for HTTP requests
584 """
585 all_nodes, report_list = get_existing_nodes_names(
586 utils.get_corosync_conf_facade()
587 )
588 if not all_nodes:
589 report_list.append(
590 reports.ReportItem.error(
591 reports.messages.CorosyncConfigNoNodesDefined()
592 )
593 )
594 if report_list:
595 process_library_reports(report_list)
596
597 enable_cluster_nodes(all_nodes)
598
599
600 def disable_cluster_all() -> None:
601 """
602 Commandline options:
603 * --request-timeout - timeout for HTTP requests
604 """
605 all_nodes, report_list = get_existing_nodes_names(
606 utils.get_corosync_conf_facade()
607 )
608 if not all_nodes:
609 report_list.append(
610 reports.ReportItem.error(
611 reports.messages.CorosyncConfigNoNodesDefined()
612 )
613 )
614 if report_list:
615 process_library_reports(report_list)
616
617 disable_cluster_nodes(all_nodes)
618
619
620 def enable_cluster_nodes(nodes: StringIterable) -> None:
621 """
622 Commandline options:
623 * --request-timeout - timeout for HTTP requests
624 """
625 error_list = utils.map_for_error_list(utils.enableCluster, nodes)
626 if error_list:
627 utils.err("unable to enable all nodes\n" + "\n".join(error_list))
628
629
630 def disable_cluster_nodes(nodes: StringIterable) -> None:
631 """
632 Commandline options:
633 * --request-timeout - timeout for HTTP requests
634 """
635 error_list = utils.map_for_error_list(utils.disableCluster, nodes)
636 if error_list:
637 utils.err("unable to disable all nodes\n" + "\n".join(error_list))
638
639
640 def destroy_cluster(argv: Argv) -> None:
641 """
642 Commandline options:
643 * --request-timeout - timeout for HTTP requests
644 """
645 if argv:
646 utils.read_known_hosts_file() # cache known hosts
647 # stop pacemaker and resources while cluster is still quorate
648 nodes = argv
649 node_errors = parallel_for_nodes(
650 utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
651 )
652 # proceed with destroy regardless of errors
653 # destroy will stop any remaining cluster daemons
654 node_errors = parallel_for_nodes(
655 utils.destroyCluster, nodes, quiet=True
656 )
657 if node_errors:
658 utils.err(
659 "unable to destroy cluster\n" + "\n".join(node_errors.values())
660 )
661
662
663 def stop_cluster(argv: Argv) -> None:
664 """
665 Commandline options:
666 * --force - no error when possible quorum loss
667 * --request-timeout - timeout for HTTP requests - effective only when at
668 least one node has been specified
669 * --pacemaker - stop pacemaker, only effective when no node has been
670 specified
671 """
672 if argv:
673 stop_cluster_nodes(argv)
674 return
675
676 if "--force" not in utils.pcs_options:
677 # corosync 3.0.1 and older:
678 # - retval is 0 on success if a node is not in a partition with quorum
679 # - retval is 1 on error OR on success if a node has quorum
680 # corosync 3.0.2 and newer:
681 # - retval is 0 on success if a node has quorum
682 # - retval is 1 on error
683 # - retval is 2 on success if a node is not in a partition with quorum
684 output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"])
685 try:
686 if QuorumStatusFacade.from_string(
687 output
688 ).stopping_local_node_cause_quorum_loss():
689 utils.err(
690 "Stopping the node will cause a loss of the quorum"
691 + ", use --force to override"
692 )
693 except QuorumStatusException:
694 if not utils.is_node_offline_by_quorumtool_output(output):
695 utils.err(
696 "Unable to determine whether stopping the node will cause "
697 + "a loss of the quorum, use --force to override"
698 )
699 # else the node seems to be stopped already, proceed to be sure
700
701 stop_all = (
702 "--pacemaker" not in utils.pcs_options
703 and "--corosync" not in utils.pcs_options
704 )
705 if stop_all or "--pacemaker" in utils.pcs_options:
706 stop_cluster_pacemaker()
707 if stop_all or "--corosync" in utils.pcs_options:
708 stop_cluster_corosync()
709
710
711 def stop_cluster_pacemaker() -> None:
712 """
713 Commandline options: no options
714 """
715 print_to_stderr("Stopping Cluster (pacemaker)...")
716 utils.stop_service("pacemaker")
717
718
719 def stop_cluster_corosync() -> None:
720 """
721 Commandline options: no options
722 """
723 print_to_stderr("Stopping Cluster (corosync)...")
724 service_list = []
725 if utils.need_to_handle_qdevice_service():
726 service_list.append("corosync-qdevice")
727 service_list.append("corosync")
728 for service in service_list:
729 utils.stop_service(service)
730
731
732 def kill_cluster(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
733 """
734 Options: no options
735 """
736 del lib
737 if argv:
738 raise CmdLineInputError()
739 modifiers.ensure_only_supported()
740 dummy_output, dummy_retval = kill_local_cluster_services()
741
742
743 # if dummy_retval != 0:
744 # print "Error: unable to execute killall -9"
745 # print output
746 # sys.exit(1)
747
748
749 def kill_local_cluster_services() -> tuple[str, int]:
750 """
751 Commandline options: no options
752 """
753 all_cluster_daemons = [
754 # Daemons taken from cluster-clean script in pacemaker
755 "pacemaker-attrd",
756 "pacemaker-based",
757 "pacemaker-controld",
758 "pacemaker-execd",
759 "pacemaker-fenced",
760 "pacemaker-remoted",
761 "pacemaker-schedulerd",
762 "pacemakerd",
763 "dlm_controld",
764 "gfs_controld",
765 # Corosync daemons
766 "corosync-qdevice",
767 "corosync",
768 ]
769 return utils.run([settings.killall_exec, "-9"] + all_cluster_daemons)
770
771
772 def cluster_push(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912, PLR0915
773 """
774 Options:
775 * --wait
776 * --config - push only configuration section of CIB
777 * -f - CIB file
778 """
779 # pylint: disable=too-many-branches
780 # pylint: disable=too-many-locals
781 # pylint: disable=too-many-statements
782
783 def get_details_from_crm_verify():
784 # get a new runner to run crm_verify command and pass the CIB filename
785 # into it so that the verify is run on the file instead on the live
786 # cluster CIB
787 verify_runner = utils.cmd_runner(cib_file_override=filename)
788 # Request verbose output, otherwise we may only get an unhelpful
789 # message:
790 # Configuration invalid (with errors) (-V may provide more detail)
791 # verify_returncode is always expected to be non-zero to indicate
792 # invalid CIB - ve run the verify because the CIB is invalid
793 (
794 verify_stdout,
795 verify_stderr,
796 verify_returncode,
797 verify_can_be_more_verbose,
798 ) = lib_pacemaker.verify(verify_runner, verbose=True)
799 return join_multilines([verify_stdout, verify_stderr])
800
801 del lib
802 modifiers.ensure_only_supported("--wait", "--config", "-f")
803 if len(argv) > 2:
804 raise CmdLineInputError()
805
806 filename = None
807 scope = None
808 timeout = None
809 diff_against = None
810
811 if modifiers.get("--wait"):
812 timeout = utils.validate_wait_get_timeout()
813 for arg in argv:
814 if "=" not in arg:
815 filename = arg
816 else:
817 arg_name, arg_value = arg.split("=", 1)
818 if arg_name == "scope":
819 if modifiers.get("--config"):
820 utils.err("Cannot use both scope and --config")
821 if not utils.is_valid_cib_scope(arg_value):
822 utils.err("invalid CIB scope '%s'" % arg_value)
823 else:
824 scope = arg_value
825 elif arg_name == "diff-against":
826 diff_against = arg_value
827 else:
828 raise CmdLineInputError()
829 if modifiers.get("--config"):
830 scope = "configuration"
831 if diff_against and scope:
832 utils.err("Cannot use both scope and diff-against")
833 if not filename:
834 raise CmdLineInputError()
835
836 try:
|
(1) Event Sigma main event: |
The application uses Python's built in `xml` module which does not properly handle erroneous or maliciously constructed data, making the application vulnerable to one or more types of XML attacks. |
|
(2) Event remediation: |
Avoid using the `xml` module. Consider using the `defusedxml` module or similar which safely prevents all XML entity attacks. |
837 new_cib_dom = xml.dom.minidom.parse(filename)
838 if scope and not new_cib_dom.getElementsByTagName(scope):
839 utils.err(
840 "unable to push cib, scope '%s' not present in new cib" % scope
841 )
842 except (OSError, ExpatError) as e:
843 utils.err("unable to parse new cib: %s" % e)
844
845 EXITCODE_INVALID_CIB = 78
846 runner = utils.cmd_runner()
847
848 if diff_against:
849 command = [
850 settings.crm_diff_exec,
851 "--original",
852 diff_against,
853 "--new",
854 filename,
855 "--no-version",
856 ]
857 patch, stderr, retval = runner.run(command)
858 # 0 (CRM_EX_OK) - success with no difference
859 # 1 (CRM_EX_ERROR) - success with difference
860 # 64 (CRM_EX_USAGE) - usage error
861 # 65 (CRM_EX_DATAERR) - XML fragments not parseable
862 if retval > 1:
863 utils.err("unable to diff the CIBs:\n" + stderr)
864 if retval == 0:
865 print_to_stderr(
866 "The new CIB is the same as the original CIB, nothing to push."
867 )
868 sys.exit(0)
869
870 command = [
871 settings.cibadmin_exec,
872 "--patch",
873 "--xml-pipe",
874 ]
875 output, stderr, retval = runner.run(command, patch)
876 if retval != 0:
877 push_output = stderr + output
878 verify_output = (
879 get_details_from_crm_verify()
880 if retval == EXITCODE_INVALID_CIB
881 else ""
882 )
883 error_text = (
884 f"{push_output}\n\n{verify_output}"
885 if verify_output.strip()
886 else push_output
887 )
888 utils.err("unable to push cib\n" + error_text)
889
890 else:
891 command = ["cibadmin", "--replace", "--xml-file", filename]
892 if scope:
893 command.append("--scope=%s" % scope)
894 output, retval = utils.run(command)
895 # 103 (CRM_EX_OLD) - update older than existing config
896 if retval == 103:
897 utils.err(
898 "Unable to push to the CIB because pushed configuration "
899 "is older than existing one. If you are sure you want to "
900 "push this configuration, try to use --config to replace only "
901 "configuration part instead of whole CIB. Otherwise get current"
902 " configuration by running command 'pcs cluster cib' and update"
903 " that."
904 )
905 elif retval != 0:
906 verify_output = (
907 get_details_from_crm_verify()
908 if retval == EXITCODE_INVALID_CIB
909 else ""
910 )
911 error_text = (
912 f"{output}\n\n{verify_output}"
913 if verify_output.strip()
914 else output
915 )
916 utils.err("unable to push cib\n" + error_text)
917
918 print_to_stderr("CIB updated")
919 try:
920 cib_errors = lib_pacemaker.get_cib_verification_errors(runner)
921 if cib_errors:
922 print_to_stderr("\n".join(cib_errors))
923 except lib_pacemaker.BadApiResultFormat as e:
924 print_to_stderr(
925 f"Unable to verify CIB: {e.original_exception}\n"
926 f"crm_verify output:\n{e.pacemaker_response}"
927 )
928
929 if not modifiers.is_specified("--wait"):
930 return
931 cmd = ["crm_resource", "--wait"]
932 if timeout:
933 cmd.extend(["--timeout", str(timeout)])
934 output, retval = utils.run(cmd)
935 if retval != 0:
936 msg = []
937 if retval == settings.pacemaker_wait_timeout_status:
938 msg.append("waiting timeout")
939 if output:
940 msg.append("\n" + output)
941 utils.err("\n".join(msg).strip())
942
943
944 def cluster_edit(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
945 """
946 Options:
947 * --config - edit configuration section of CIB
948 * -f - CIB file
949 * --wait
950 """
951 # pylint: disable=too-many-branches
952 modifiers.ensure_only_supported("--config", "--wait", "-f")
953 if "EDITOR" in os.environ:
954 if len(argv) > 1:
955 raise CmdLineInputError()
956
957 scope = None
958 scope_arg = ""
959 for arg in argv:
960 if "=" not in arg:
961 raise CmdLineInputError()
962 arg_name, arg_value = arg.split("=", 1)
963 if arg_name == "scope" and not modifiers.get("--config"):
964 if not utils.is_valid_cib_scope(arg_value):
965 utils.err("invalid CIB scope '%s'" % arg_value)
966 else:
967 scope_arg = arg
968 scope = arg_value
969 else:
970 raise CmdLineInputError()
971 if modifiers.get("--config"):
972 scope = "configuration"
973 # Leave scope_arg empty as cluster_push will pick up a --config
974 # option from utils.pcs_options
975 scope_arg = ""
976
977 editor = os.environ["EDITOR"]
978 cib = utils.get_cib(scope)
979 with tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs") as tempcib:
980 tempcib.write(cib)
981 tempcib.flush()
982 try:
983 subprocess.call([editor, tempcib.name])
984 except OSError:
985 utils.err("unable to open file with $EDITOR: " + editor)
986
987 tempcib.seek(0)
988 newcib = "".join(tempcib.readlines())
989 if newcib == cib:
990 print_to_stderr("CIB not updated, no changes detected")
991 else:
992 cluster_push(
993 lib,
994 [arg for arg in [tempcib.name, scope_arg] if arg],
995 modifiers.get_subset("--wait", "--config", "-f"),
996 )
997
998 else:
999 utils.err("$EDITOR environment variable is not set")
1000
1001
1002 def get_cib(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1003 """
1004 Options:
1005 * --config show configuration section of CIB
1006 * -f - CIB file
1007 """
1008 # pylint: disable=too-many-branches
1009 del lib
1010 modifiers.ensure_only_supported("--config", "-f")
1011 if len(argv) > 2:
1012 raise CmdLineInputError()
1013
1014 filename = None
1015 scope = None
1016 for arg in argv:
1017 if "=" not in arg:
1018 filename = arg
1019 else:
1020 arg_name, arg_value = arg.split("=", 1)
1021 if arg_name == "scope" and not modifiers.get("--config"):
1022 if not utils.is_valid_cib_scope(arg_value):
1023 utils.err("invalid CIB scope '%s'" % arg_value)
1024 else:
1025 scope = arg_value
1026 else:
1027 raise CmdLineInputError()
1028 if modifiers.get("--config"):
1029 scope = "configuration"
1030
1031 if not filename:
1032 print(utils.get_cib(scope).rstrip())
1033 else:
1034 output = utils.get_cib(scope)
1035 if not output:
1036 utils.err("No data in the CIB")
1037 try:
1038 with open(filename, "w") as cib_file:
1039 cib_file.write(output)
1040 except OSError as e:
1041 utils.err(
1042 "Unable to write to file '%s', %s" % (filename, e.strerror)
1043 )
1044
1045
1046 class RemoteAddNodes(RunRemotelyBase):
1047 def __init__(self, report_processor, target, data):
1048 super().__init__(report_processor)
1049 self._target = target
1050 self._data = data
1051 self._success = False
1052
1053 def get_initial_request_list(self):
1054 return [
1055 Request(
1056 self._target,
1057 RequestData(
1058 "remote/cluster_add_nodes",
1059 [("data_json", json.dumps(self._data))],
1060 ),
1061 )
1062 ]
1063
1064 def _process_response(self, response):
1065 node_label = response.request.target.label
1066 report_item = self._get_response_report(response)
1067 if report_item is not None:
1068 self._report(report_item)
1069 return
1070
1071 try:
1072 output = json.loads(response.data)
1073 for report_dict in output["report_list"]:
1074 self._report(
1075 reports.ReportItem(
1076 severity=reports.ReportItemSeverity(
1077 report_dict["severity"],
1078 report_dict["forceable"],
1079 ),
1080 message=reports.messages.LegacyCommonMessage(
1081 report_dict["code"],
1082 report_dict["info"],
1083 report_dict["report_text"],
1084 ),
1085 )
1086 )
1087 if output["status"] == "success":
1088 self._success = True
1089 elif output["status"] != "error":
1090 print_to_stderr("Error: {}".format(output["status_msg"]))
1091
1092 except (KeyError, json.JSONDecodeError):
1093 self._report(
1094 reports.ReportItem.warning(
1095 reports.messages.InvalidResponseFormat(node_label)
1096 )
1097 )
1098
1099 def on_complete(self):
1100 return self._success
1101
1102
1103 def node_add_outside_cluster(
1104 lib: Any, argv: Argv, modifiers: InputModifiers
1105 ) -> None:
1106 """
1107 Options:
1108 * --wait - wait until new node will start up, effective only when --start
1109 is specified
1110 * --start - start new node
1111 * --enable - enable new node
1112 * --force - treat validation issues and not resolvable addresses as
1113 warnings instead of errors
1114 * --skip-offline - skip unreachable nodes
1115 * --no-watchdog-validation - do not validatate watchdogs
1116 * --request-timeout - HTTP request timeout
1117 """
1118 del lib
1119 modifiers.ensure_only_supported(
1120 "--wait",
1121 "--start",
1122 "--enable",
1123 "--force",
1124 "--skip-offline",
1125 "--no-watchdog-validation",
1126 "--request-timeout",
1127 )
1128 if len(argv) < 2:
1129 raise CmdLineInputError(
1130 "Usage: pcs cluster node add-outside <cluster node> <node name> "
1131 "[addr=<node address>]... [watchdog=<watchdog path>] "
1132 "[device=<SBD device path>]... [--start [--wait[=<n>]]] [--enable] "
1133 "[--no-watchdog-validation]"
1134 )
1135
1136 cluster_node, *argv = argv
1137 node_dict = _parse_add_node(argv)
1138
1139 force_flags = []
1140 if modifiers.get("--force"):
1141 force_flags.append(reports.codes.FORCE)
1142 if modifiers.get("--skip-offline"):
1143 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1144 cmd_data = dict(
1145 nodes=[node_dict],
1146 wait=modifiers.get("--wait"),
1147 start=modifiers.get("--start"),
1148 enable=modifiers.get("--enable"),
1149 no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
1150 force_flags=force_flags,
1151 )
1152
1153 lib_env = utils.get_lib_env()
1154 report_processor = lib_env.report_processor
1155 target_factory = lib_env.get_node_target_factory()
1156 report_list, target_list = target_factory.get_target_list_with_reports(
1157 [cluster_node],
1158 skip_non_existing=False,
1159 allow_skip=False,
1160 )
1161 report_processor.report_list(report_list)
1162 if report_processor.has_errors:
1163 raise LibraryError()
1164
1165 com_cmd = RemoteAddNodes(report_processor, target_list[0], cmd_data)
1166 was_successful = run_com_cmd(lib_env.get_node_communicator(), com_cmd)
1167
1168 if not was_successful:
1169 raise LibraryError()
1170
1171
1172 def node_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1173 """
1174 Options:
1175 * --force - continue even though the action may cause qourum loss
1176 * --skip-offline - skip unreachable nodes
1177 * --request-timeout - HTTP request timeout
1178 """
1179 modifiers.ensure_only_supported(
1180 "--force",
1181 "--skip-offline",
1182 "--request-timeout",
1183 )
1184 if not argv:
1185 raise CmdLineInputError()
1186
1187 force_flags = []
1188 if modifiers.get("--force"):
1189 force_flags.append(reports.codes.FORCE)
1190 if modifiers.get("--skip-offline"):
1191 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1192
1193 lib.cluster.remove_nodes(argv, force_flags=force_flags)
1194
1195
1196 def cluster_uidgid( # noqa: PLR0912
1197 lib: Any, argv: Argv, modifiers: InputModifiers, silent_list: bool = False
1198 ) -> None:
1199 """
1200 Options: no options
1201 """
1202 # pylint: disable=too-many-branches
1203 # pylint: disable=too-many-locals
1204 del lib
1205 modifiers.ensure_only_supported()
1206 if not argv:
1207 uid_gid_files = os.listdir(settings.corosync_uidgid_dir)
1208 uid_gid_lines: list[str] = []
1209 for ug_file in uid_gid_files:
1210 uid_gid_dict = utils.read_uid_gid_file(ug_file)
1211 if "uid" in uid_gid_dict or "gid" in uid_gid_dict:
1212 line = "UID/GID: uid="
1213 if "uid" in uid_gid_dict:
1214 line += uid_gid_dict["uid"]
1215 line += " gid="
1216 if "gid" in uid_gid_dict:
1217 line += uid_gid_dict["gid"]
1218
1219 uid_gid_lines.append(line)
1220 if uid_gid_lines:
1221 print("\n".join(sorted(uid_gid_lines)))
1222 elif not silent_list:
1223 print_to_stderr("No uidgids configured")
1224 return
1225
1226 command = argv.pop(0)
1227 uid = ""
1228 gid = ""
1229
1230 if command in {"add", "delete", "remove"} and argv:
1231 for arg in argv:
1232 if arg.find("=") == -1:
1233 utils.err(
1234 "uidgid options must be of the form uid=<uid> gid=<gid>"
1235 )
1236
1237 (key, value) = arg.split("=", 1)
1238 if key not in {"uid", "gid"}:
1239 utils.err(
1240 "%s is not a valid key, you must use uid or gid" % key
1241 )
1242
1243 if key == "uid":
1244 uid = value
1245 if key == "gid":
1246 gid = value
1247 if uid == "" and gid == "":
1248 utils.err("you must set either uid or gid")
1249
1250 if command == "add":
1251 utils.write_uid_gid_file(uid, gid)
1252 elif command in {"delete", "remove"}:
1253 file_removed = utils.remove_uid_gid_file(uid, gid)
1254 if not file_removed:
1255 utils.err(
1256 "no uidgid files with uid=%s and gid=%s found" % (uid, gid)
1257 )
1258 else:
1259 raise CmdLineInputError()
1260
1261
1262 def cluster_get_corosync_conf(
1263 lib: Any, argv: Argv, modifiers: InputModifiers
1264 ) -> None:
1265 """
1266 Options:
1267 * --request-timeout - timeout for HTTP requests, effetive only when at
1268 least one node has been specified
1269 """
1270 del lib
1271 modifiers.ensure_only_supported("--request-timeout")
1272 if len(argv) > 1:
1273 raise CmdLineInputError()
1274
1275 if not argv:
1276 print(utils.getCorosyncConf().rstrip())
1277 return
1278
1279 node = argv[0]
1280 retval, output = utils.getCorosyncConfig(node)
1281 if retval != 0:
1282 utils.err(output)
1283 else:
1284 print(output.rstrip())
1285
1286
1287 def cluster_reload(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1288 """
1289 Options: no options
1290 """
1291 del lib
1292 modifiers.ensure_only_supported()
1293 if len(argv) != 1 or argv[0] != "corosync":
1294 raise CmdLineInputError()
1295
1296 output, retval = utils.reloadCorosync()
1297 if retval != 0 or "invalid option" in output:
1298 utils.err(output.rstrip())
1299 print_to_stderr("Corosync reloaded")
1300
1301
1302 # Completely tear down the cluster & remove config files
1303 # Code taken from cluster-clean script in pacemaker
1304 def cluster_destroy(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1305 """
1306 Options:
1307 * --all - destroy cluster on all cluster nodes => destroy whole cluster
1308 * --force - required for destroying the cluster - DEPRECATED
1309 * --request-timeout - timeout of HTTP requests, effective only with --all
1310 * --yes - required for destroying the cluster
1311 """
1312 # pylint: disable=too-many-branches
1313 # pylint: disable=too-many-statements
1314 del lib
1315 modifiers.ensure_only_supported(
1316 "--all", "--force", "--request-timeout", "--yes"
1317 )
1318 if argv:
1319 raise CmdLineInputError()
1320 if utils.is_run_interactive():
1321 warn(
1322 "It is recommended to run 'pcs cluster stop' before "
1323 "destroying the cluster."
1324 )
1325 if not utils.get_continue_confirmation(
1326 "This would kill all cluster processes and then PERMANENTLY remove "
1327 "cluster state and configuration",
1328 bool(modifiers.get("--yes")),
1329 bool(modifiers.get("--force")),
1330 ):
1331 return
1332 if modifiers.get("--all"):
1333 # load data
1334 cib = None
1335 lib_env = utils.get_lib_env()
1336 try:
1337 cib = lib_env.get_cib()
1338 except LibraryError:
1339 warn(
1340 "Unable to load CIB to get guest and remote nodes from it, "
1341 "those nodes will not be deconfigured."
1342 )
1343 corosync_nodes, report_list = get_existing_nodes_names(
1344 utils.get_corosync_conf_facade()
1345 )
1346 if not corosync_nodes:
1347 report_list.append(
1348 reports.ReportItem.error(
1349 reports.messages.CorosyncConfigNoNodesDefined()
1350 )
1351 )
1352 if report_list:
1353 process_library_reports(report_list)
1354
1355 # destroy remote and guest nodes
1356 if cib is not None:
1357 try:
1358 all_remote_nodes, report_list = get_existing_nodes_names(
1359 cib=cib
1360 )
1361 if report_list:
1362 process_library_reports(report_list)
1363 if all_remote_nodes:
1364 _destroy_pcmk_remote_env(
1365 lib_env,
1366 all_remote_nodes,
1367 skip_offline_nodes=True,
1368 allow_fails=True,
1369 )
1370 except LibraryError as e:
1371 process_library_reports(list(e.args))
1372
1373 # destroy full-stack nodes
1374 destroy_cluster(corosync_nodes)
1375 else:
1376 print_to_stderr("Shutting down pacemaker/corosync services...")
1377 for service in ["pacemaker", "corosync-qdevice", "corosync"]:
1378 # It is safe to ignore error since we want it not to be running
1379 # anyways.
1380 with contextlib.suppress(LibraryError):
1381 utils.stop_service(service)
1382 print_to_stderr("Killing any remaining services...")
1383 kill_local_cluster_services()
1384 # previously errors were suppressed in here, let's keep it that way
1385 # for now
1386 with contextlib.suppress(Exception):
1387 utils.disableServices()
1388
1389 # it's not a big deal if sbd disable fails
1390 with contextlib.suppress(Exception):
1391 service_manager = utils.get_service_manager()
1392 service_manager.disable(
1393 lib_sbd.get_sbd_service_name(service_manager)
1394 )
1395
1396 print_to_stderr("Removing all cluster configuration files...")
1397 dummy_output, dummy_retval = utils.run(
1398 [
1399 settings.rm_exec,
1400 "-f",
1401 settings.corosync_conf_file,
1402 settings.corosync_authkey_file,
1403 settings.pacemaker_authkey_file,
1404 settings.pcsd_dr_config_location,
1405 ]
1406 )
1407 state_files = [
1408 "cib-*",
1409 "cib.*",
1410 "cib.xml*",
1411 "core.*",
1412 "cts.*",
1413 "hostcache",
1414 "pe*.bz2",
1415 ]
1416 for name in state_files:
1417 dummy_output, dummy_retval = utils.run(
1418 [
1419 settings.find_exec,
1420 settings.pacemaker_local_state_dir,
1421 "-name",
1422 name,
1423 "-exec",
1424 settings.rm_exec,
1425 "-f",
1426 "{}",
1427 ";",
1428 ]
1429 )
1430 # errors from deleting other files are suppressed as well we do not
1431 # want to fail if qdevice was not set up
1432 with contextlib.suppress(Exception):
1433 qdevice_net.client_destroy()
1434
1435
1436 def cluster_verify(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1437 """
1438 Options:
1439 * -f - CIB file
1440 * --full - more verbose output
1441 """
1442 modifiers.ensure_only_supported("-f", "--full")
1443 if argv:
1444 raise CmdLineInputError()
1445
1446 lib.cluster.verify(verbose=modifiers.get("--full"))
1447
1448
1449 def cluster_report(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1450 """
1451 Options:
1452 * --force - allow overwriting existing files - DEPRECATED
1453 * --from - timestamp
1454 * --to - timestamp
1455 * --overwrite - allow overwriting existing files
1456 The resulting file should be stored on the machine where pcs cli is
1457 running, not on the machine where pcs daemon is running. Therefore we
1458 want to use --overwrite and not --force.
1459 """
1460
1461 # pylint: disable=too-many-branches
1462 del lib
1463 modifiers.ensure_only_supported("--force", "--from", "--overwrite", "--to")
1464 if len(argv) != 1:
1465 raise CmdLineInputError()
1466
1467 outfile = argv[0]
1468 dest_outfile = outfile + ".tar.bz2"
1469 if os.path.exists(dest_outfile):
1470 if not (modifiers.get("--overwrite") or modifiers.get("--force")):
1471 utils.err(
1472 dest_outfile + " already exists, use --overwrite to overwrite"
1473 )
1474 return
1475 if modifiers.get("--force"):
1476 # deprecated in the first pcs-0.12 version, replaced by --overwrite
1477 deprecation_warning(
1478 "Using --force to confirm this action is deprecated and might "
1479 "be removed in a future release, use --overwrite instead"
1480 )
1481 try:
1482 os.remove(dest_outfile)
1483 except OSError as e:
1484 utils.err(f"Unable to remove {dest_outfile}: {format_os_error(e)}")
1485 crm_report_opts = []
1486
1487 crm_report_opts.append("-f")
1488 if modifiers.is_specified("--from"):
1489 crm_report_opts.append(str(modifiers.get("--from")))
1490 if modifiers.is_specified("--to"):
1491 crm_report_opts.append("-t")
1492 crm_report_opts.append(str(modifiers.get("--to")))
1493 else:
1494 yesterday = datetime.datetime.now() - datetime.timedelta(1)
1495 crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M"))
1496
1497 crm_report_opts.append(outfile)
1498 output, retval = utils.run([settings.crm_report_exec] + crm_report_opts)
1499 if retval != 0 and (
1500 "ERROR: Cannot determine nodes; specify --nodes or --single-node"
1501 in output
1502 ):
1503 utils.err("cluster is not configured on this node")
1504 newoutput = ""
1505 for line in output.split("\n"):
1506 if line.startswith(("cat:", "grep", "tail")):
1507 continue
1508 if "We will attempt to remove" in line:
1509 continue
1510 if "-p option" in line:
1511 continue
1512 if "However, doing" in line:
1513 continue
1514 if "to diagnose" in line:
1515 continue
1516 new_line = line
1517 if "--dest" in line:
1518 new_line = line.replace("--dest", "<dest>")
1519 newoutput = newoutput + new_line + "\n"
1520 if retval != 0:
1521 utils.err(newoutput)
1522 print_to_stderr(newoutput)
1523
1524
1525 # TODO this should be implemented in multiple lib commands, and the cli should
1526 # only call these commands as needed
1527 # - lib command for checking auth, that returns not authorized nodes
1528 # - if any not authorized nodes
1529 # - the cli asks for a username and pass
1530 # - call lib command for authorizing hosts
1531 # - else:
1532 # - call lib command to send the configs to other nodes
1533 #
1534 # This command itself is always run as root, see app.py (_non_root_run)
1535 # So we do not need to deal with the configs in .pcs for non-root run
1536 def cluster_auth_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1537 """
1538 Options:
1539 * --corosync_conf - corosync.conf file
1540 * --request-timeout - timeout of HTTP requests
1541 * -u - username
1542 * -p - password
1543 """
1544 # pylint: disable=too-many-branches
1545 # pylint: disable=too-many-locals
1546 modifiers.ensure_only_supported(
1547 "--corosync_conf", "--request-timeout", "-u", "-p"
1548 )
1549 if argv:
1550 raise CmdLineInputError()
1551 lib_env = utils.get_lib_env()
1552 target_factory = lib_env.get_node_target_factory()
1553 corosync_conf = lib_env.get_corosync_conf()
1554 cluster_node_list = corosync_conf.get_nodes()
1555 cluster_node_names = []
1556 missing_name = False
1557 for node in cluster_node_list:
1558 if node.name:
1559 cluster_node_names.append(node.name)
1560 else:
1561 missing_name = True
1562 if missing_name:
1563 warn(
1564 "Skipping nodes which do not have their name defined in "
1565 "corosync.conf, use the 'pcs host auth' command to authenticate "
1566 "them"
1567 )
1568 target_list = []
1569 not_authorized_node_name_list = []
1570 for node_name in cluster_node_names:
1571 try:
1572 target_list.append(target_factory.get_target(node_name))
1573 except HostNotFound:
1574 print_to_stderr("{}: Not authorized".format(node_name))
1575 not_authorized_node_name_list.append(node_name)
1576 com_cmd = CheckAuth(lib_env.report_processor)
1577 com_cmd.set_targets(target_list)
1578 not_authorized_node_name_list.extend(
1579 run_and_raise(lib_env.get_node_communicator(), com_cmd)
1580 )
1581 if not_authorized_node_name_list:
1582 print(
1583 "Nodes to authorize: {}".format(
1584 ", ".join(not_authorized_node_name_list)
1585 )
1586 )
1587 username, password = utils.get_user_and_pass()
1588 not_auth_node_list = []
1589 for node_name in not_authorized_node_name_list:
1590 for node in cluster_node_list:
1591 if node.name == node_name:
1592 if node.addrs_plain():
1593 not_auth_node_list.append(node)
1594 else:
1595 print_to_stderr(
1596 f"{node.name}: No addresses defined in "
1597 "corosync.conf, use the 'pcs host auth' command to "
1598 "authenticate the node"
1599 )
1600 nodes_to_auth_data = {
1601 node.name: HostAuthData(
1602 username,
1603 password,
1604 [
1605 Destination(
1606 node.addrs_plain()[0], settings.pcsd_default_port
1607 )
1608 ],
1609 )
1610 for node in not_auth_node_list
1611 }
1612 lib.auth.auth_hosts(nodes_to_auth_data)
1613 else:
1614 # TODO backwards compatibility
1615 # The command overwrites known-hosts and pcsd_settings.conf on all
1616 # cluster nodes with local version, only if all of the nodes are
1617 # already authorized. We should investigate what is the reason why
1618 # the command does this, and decide if we should drop/keep/change this
1619 configs = {}
1620 for file_type_code in SYNCED_CONFIGS:
1621 file_instance = FileInstance.for_common(file_type_code)
1622 if not file_instance.raw_file.exists():
1623 # it's not an error if the file does not exist locally, we just
1624 # wont send it
1625 continue
1626 try:
1627 configs[file_type_code] = file_instance.read_raw().decode(
1628 "utf-8"
1629 )
1630 except RawFileError as e:
1631 # in case of error when reading some file, we still might be able
1632 # to read and send the others without issues
1633 lib_env.report_processor.report(
1634 raw_file_error_report(e, is_forced_or_warning=True)
1635 )
1636 set_configs_cmd = SetConfigs(
1637 lib_env.report_processor,
1638 corosync_conf.get_cluster_name(),
1639 configs,
1640 force=True,
1641 rejection_severity=reports.ReportItemSeverity.error(),
1642 )
1643 set_configs_cmd.set_targets(target_list)
1644 run_and_raise(lib_env.get_node_communicator(), set_configs_cmd)
1645
1646
1647 def _parse_node_options(
1648 node: str,
1649 options: Argv,
1650 additional_options: StringCollection = (),
1651 additional_repeatable_options: StringCollection = (),
1652 ) -> dict[str, Union[str, list[str]]]:
1653 """
1654 Commandline options: no options
1655 """
1656 ADDR_OPT_KEYWORD = "addr" # pylint: disable=invalid-name
1657 supported_options = {ADDR_OPT_KEYWORD} | set(additional_options)
1658 repeatable_options = {ADDR_OPT_KEYWORD} | set(additional_repeatable_options)
1659 parser = KeyValueParser(options, repeatable_options)
1660 parsed_unique = parser.get_unique()
1661 parsed_repeatable = parser.get_repeatable()
1662 unknown_options = (
1663 set(parsed_unique.keys()) | set(parsed_repeatable)
1664 ) - supported_options
1665 if unknown_options:
1666 raise CmdLineInputError(
1667 f"Unknown options {format_list(unknown_options)} for node '{node}'"
1668 )
1669 parsed_unique["name"] = node
1670 if ADDR_OPT_KEYWORD in parsed_repeatable:
1671 parsed_repeatable["addrs"] = parsed_repeatable[ADDR_OPT_KEYWORD]
1672 del parsed_repeatable[ADDR_OPT_KEYWORD]
1673 return parsed_unique | parsed_repeatable
1674
1675
1676 TRANSPORT_KEYWORD = "transport"
1677 TRANSPORT_DEFAULT_SECTION = "__default__"
1678 LINK_KEYWORD = "link"
1679
1680
1681 def _parse_transport(
1682 transport_args: Argv,
1683 ) -> tuple[str, dict[str, Union[dict[str, str], list[dict[str, str]]]]]:
1684 """
1685 Commandline options: no options
1686 """
1687 if not transport_args:
1688 raise CmdLineInputError(
1689 f"{TRANSPORT_KEYWORD.capitalize()} type not defined"
1690 )
1691 transport_type, *transport_options = transport_args
1692
1693 keywords = {"compression", "crypto", LINK_KEYWORD}
1694 parsed_options = parse_args.group_by_keywords(
1695 transport_options,
1696 keywords,
1697 implicit_first_keyword=TRANSPORT_DEFAULT_SECTION,
1698 )
1699 options: dict[str, Union[dict[str, str], list[dict[str, str]]]] = {
1700 section: KeyValueParser(
1701 parsed_options.get_args_flat(section)
1702 ).get_unique()
1703 for section in keywords | {TRANSPORT_DEFAULT_SECTION}
1704 if section != LINK_KEYWORD
1705 }
1706 options[LINK_KEYWORD] = [
1707 KeyValueParser(link_options).get_unique()
1708 for link_options in parsed_options.get_args_groups(LINK_KEYWORD)
1709 ]
1710
1711 return transport_type, options
1712
1713
1714 def cluster_setup(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1715 """
1716 Options:
1717 * --wait - only effective when used with --start
1718 * --start - start cluster
1719 * --enable - enable cluster
1720 * --force - some validation issues and unresolvable addresses are treated
1721 as warnings
1722 * --no-keys-sync - do not create and distribute pcsd ssl cert and key,
1723 corosync and pacemaker authkeys
1724 * --no-cluster-uuid - do not generate a cluster UUID during setup
1725 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1726 * --overwrite - allow overwriting existing files
1727 """
1728 # pylint: disable=too-many-locals
1729 is_local = modifiers.is_specified("--corosync_conf")
1730
1731 allowed_options_common = ["--force", "--no-cluster-uuid"]
1732 allowed_options_live = [
1733 "--wait",
1734 "--start",
1735 "--enable",
1736 "--no-keys-sync",
1737 ]
1738 allowed_options_local = ["--corosync_conf", "--overwrite"]
1739 modifiers.ensure_only_supported(
1740 *(
1741 allowed_options_common
1742 + allowed_options_live
1743 + allowed_options_local
1744 ),
1745 )
1746 if is_local and modifiers.is_specified_any(allowed_options_live):
1747 raise CmdLineInputError(
1748 f"Cannot specify any of {format_list(allowed_options_live)} "
1749 "when '--corosync_conf' is specified"
1750 )
1751 if not is_local and modifiers.is_specified("--overwrite"):
1752 raise CmdLineInputError(
1753 "Cannot specify '--overwrite' when '--corosync_conf' is not "
1754 "specified"
1755 )
1756
1757 if len(argv) < 2:
1758 raise CmdLineInputError()
1759 cluster_name, *argv = argv
1760 keywords = [TRANSPORT_KEYWORD, "totem", "quorum"]
1761 parsed_args = parse_args.group_by_keywords(
1762 argv, keywords, implicit_first_keyword="nodes"
1763 )
1764 parsed_args.ensure_unique_keywords()
1765 nodes = [
1766 _parse_node_options(node, options)
1767 for node, options in parse_args.split_list_by_any_keywords(
1768 parsed_args.get_args_flat("nodes"), "node name"
1769 ).items()
1770 ]
1771
1772 transport_type = None
1773 transport_options: dict[
1774 str, Union[dict[str, str], list[dict[str, str]]]
1775 ] = {}
1776
1777 if parsed_args.has_keyword(TRANSPORT_KEYWORD):
1778 transport_type, transport_options = _parse_transport(
1779 parsed_args.get_args_flat(TRANSPORT_KEYWORD)
1780 )
1781
1782 force_flags = []
1783 if modifiers.get("--force"):
1784 force_flags.append(reports.codes.FORCE)
1785
1786 totem_options = KeyValueParser(
1787 parsed_args.get_args_flat("totem")
1788 ).get_unique()
1789 quorum_options = KeyValueParser(
1790 parsed_args.get_args_flat("quorum")
1791 ).get_unique()
1792
1793 if not is_local:
1794 lib.cluster.setup(
1795 cluster_name,
1796 nodes,
1797 transport_type=transport_type,
1798 transport_options=transport_options.get(
1799 TRANSPORT_DEFAULT_SECTION, {}
1800 ),
1801 link_list=transport_options.get(LINK_KEYWORD, []),
1802 compression_options=transport_options.get("compression", {}),
1803 crypto_options=transport_options.get("crypto", {}),
1804 totem_options=totem_options,
1805 quorum_options=quorum_options,
1806 wait=modifiers.get("--wait"),
1807 start=modifiers.get("--start"),
1808 enable=modifiers.get("--enable"),
1809 no_keys_sync=modifiers.get("--no-keys-sync"),
1810 no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1811 force_flags=force_flags,
1812 )
1813 return
1814
1815 corosync_conf_data = lib.cluster.setup_local(
1816 cluster_name,
1817 nodes,
1818 transport_type=transport_type,
1819 transport_options=transport_options.get(TRANSPORT_DEFAULT_SECTION, {}),
1820 link_list=transport_options.get(LINK_KEYWORD, []),
1821 compression_options=transport_options.get("compression", {}),
1822 crypto_options=transport_options.get("crypto", {}),
1823 totem_options=totem_options,
1824 quorum_options=quorum_options,
1825 no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1826 force_flags=force_flags,
1827 )
1828
1829 corosync_conf_file = pcs_file.RawFile(
1830 file_metadata.for_file_type(
1831 file_type_codes.COROSYNC_CONF, modifiers.get("--corosync_conf")
1832 )
1833 )
1834 overwrite = modifiers.is_specified("--overwrite")
1835 try:
1836 corosync_conf_file.write(corosync_conf_data, can_overwrite=overwrite)
1837 except pcs_file.FileAlreadyExists as e:
1838 utils.err(
1839 reports.messages.FileAlreadyExists(
1840 e.metadata.file_type_code,
1841 e.metadata.path,
1842 ).message
1843 + ", use --overwrite to overwrite existing file(s)"
1844 )
1845 except pcs_file.RawFileError as e:
1846 utils.err(
1847 reports.messages.FileIoError(
1848 e.metadata.file_type_code,
1849 e.action,
1850 e.reason,
1851 file_path=e.metadata.path,
1852 ).message
1853 )
1854
1855
1856 def config_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1857 """
1858 Options:
1859 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1860 """
1861 modifiers.ensure_only_supported("--corosync_conf")
1862 parsed_args = parse_args.group_by_keywords(
1863 argv,
1864 ["transport", "compression", "crypto", "totem"],
1865 )
1866
1867 transport_options = KeyValueParser(
1868 parsed_args.get_args_flat("transport")
1869 ).get_unique()
1870 compression_options = KeyValueParser(
1871 parsed_args.get_args_flat("compression")
1872 ).get_unique()
1873 crypto_options = KeyValueParser(
1874 parsed_args.get_args_flat("crypto")
1875 ).get_unique()
1876 totem_options = KeyValueParser(
1877 parsed_args.get_args_flat("totem")
1878 ).get_unique()
1879
1880 if not modifiers.is_specified("--corosync_conf"):
1881 lib.cluster.config_update(
1882 transport_options,
1883 compression_options,
1884 crypto_options,
1885 totem_options,
1886 )
1887 return
1888
1889 _corosync_conf_local_cmd_call(
1890 modifiers.get("--corosync_conf"),
1891 lambda corosync_conf_content: lib.cluster.config_update_local(
1892 corosync_conf_content,
1893 transport_options,
1894 compression_options,
1895 crypto_options,
1896 totem_options,
1897 ),
1898 )
1899
1900
1901 def _format_options(label: str, options: Mapping[str, str]) -> list[str]:
1902 output = []
1903 if options:
1904 output.append(f"{label}:")
1905 output.extend(
1906 indent([f"{opt}: {val}" for opt, val in sorted(options.items())])
1907 )
1908 return output
1909
1910
1911 def _format_nodes(nodes: Iterable[CorosyncNodeDto]) -> list[str]:
1912 output = ["Nodes:"]
1913 for node in sorted(nodes, key=lambda node: node.name):
1914 node_attrs = [
1915 f"Link {addr.link} address: {addr.addr}"
1916 for addr in sorted(node.addrs, key=lambda addr: addr.link)
1917 ] + [f"nodeid: {node.nodeid}"]
1918 output.extend(indent([f"{node.name}:"] + indent(node_attrs)))
1919 return output
1920
1921
1922 def config_show(
1923 lib: Any, argv: Argv, modifiers: parse_args.InputModifiers
1924 ) -> None:
1925 """
1926 Options:
1927 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1928 * --output-format - supported formats: text, cmd, json
1929 """
1930 modifiers.ensure_only_supported(
1931 "--corosync_conf", output_format_supported=True
1932 )
1933 if argv:
1934 raise CmdLineInputError()
1935 output_format = modifiers.get_output_format()
1936 corosync_conf_dto = lib.cluster.get_corosync_conf_struct()
1937 if output_format == OUTPUT_FORMAT_VALUE_CMD:
1938 if corosync_conf_dto.quorum_device is not None:
1939 warn(
1940 "Quorum device configuration detected but not yet supported by "
1941 "this command."
1942 )
1943 output = " \\\n".join(_config_get_cmd(corosync_conf_dto))
1944 elif output_format == OUTPUT_FORMAT_VALUE_JSON:
1945 output = json.dumps(dto.to_dict(corosync_conf_dto))
1946 else:
1947 output = "\n".join(_config_get_text(corosync_conf_dto))
1948 print(output)
1949
1950
1951 def _config_get_text(corosync_conf: CorosyncConfDto) -> list[str]:
1952 lines = [f"Cluster Name: {corosync_conf.cluster_name}"]
1953 if corosync_conf.cluster_uuid:
1954 lines.append(f"Cluster UUID: {corosync_conf.cluster_uuid}")
1955 lines.append(f"Transport: {corosync_conf.transport.lower()}")
1956 lines.extend(_format_nodes(corosync_conf.nodes))
1957 if corosync_conf.links_options:
1958 lines.append("Links:")
1959 for linknum, link_options in sorted(
1960 corosync_conf.links_options.items()
1961 ):
1962 lines.extend(
1963 indent(_format_options(f"Link {linknum}", link_options))
1964 )
1965
1966 lines.extend(
1967 _format_options("Transport Options", corosync_conf.transport_options)
1968 )
1969 lines.extend(
1970 _format_options(
1971 "Compression Options", corosync_conf.compression_options
1972 )
1973 )
1974 lines.extend(
1975 _format_options("Crypto Options", corosync_conf.crypto_options)
1976 )
1977 lines.extend(_format_options("Totem Options", corosync_conf.totem_options))
1978 lines.extend(
1979 _format_options("Quorum Options", corosync_conf.quorum_options)
1980 )
1981 if corosync_conf.quorum_device:
1982 lines.append(f"Quorum Device: {corosync_conf.quorum_device.model}")
1983 lines.extend(
1984 indent(
1985 _format_options(
1986 "Options", corosync_conf.quorum_device.generic_options
1987 )
1988 )
1989 )
1990 lines.extend(
1991 indent(
1992 _format_options(
1993 "Model Options",
1994 corosync_conf.quorum_device.model_options,
1995 )
1996 )
1997 )
1998 lines.extend(
1999 indent(
2000 _format_options(
2001 "Heuristics",
2002 corosync_conf.quorum_device.heuristics_options,
2003 )
2004 )
2005 )
2006 return lines
2007
2008
2009 def _corosync_node_to_cmd_line(node: CorosyncNodeDto) -> str:
2010 return " ".join(
2011 [node.name]
2012 + [
2013 f"addr={addr.addr}"
2014 for addr in sorted(node.addrs, key=lambda addr: addr.link)
2015 ]
2016 )
2017
2018
2019 def _section_to_lines(
2020 options: Mapping[str, str], keyword: Optional[str] = None
2021 ) -> list[str]:
2022 output: list[str] = []
2023 if options:
2024 if keyword:
2025 output.append(keyword)
2026 output.extend(
2027 indent([f"{key}={val}" for key, val in sorted(options.items())])
2028 )
2029 return indent(output)
2030
2031
2032 def _config_get_cmd(corosync_conf: CorosyncConfDto) -> list[str]:
2033 lines = [f"pcs cluster setup {corosync_conf.cluster_name}"]
2034 lines += indent(
2035 [
2036 _corosync_node_to_cmd_line(node)
2037 for node in sorted(
2038 corosync_conf.nodes, key=lambda node: node.nodeid
2039 )
2040 ]
2041 )
2042 transport = [
2043 "transport",
2044 str(corosync_conf.transport.value).lower(),
2045 ] + _section_to_lines(corosync_conf.transport_options)
2046 for _, link in sorted(corosync_conf.links_options.items()):
2047 transport.extend(_section_to_lines(link, "link"))
2048 transport.extend(
2049 _section_to_lines(corosync_conf.compression_options, "compression")
2050 )
2051 transport.extend(_section_to_lines(corosync_conf.crypto_options, "crypto"))
2052 lines.extend(indent(transport))
2053 lines.extend(_section_to_lines(corosync_conf.totem_options, "totem"))
2054 lines.extend(_section_to_lines(corosync_conf.quorum_options, "quorum"))
2055 if not corosync_conf.cluster_uuid:
2056 lines.extend(indent(["--no-cluster-uuid"]))
2057 return lines
2058
2059
2060 def _parse_add_node(argv: Argv) -> dict[str, Union[str, list[str]]]:
2061 DEVICE_KEYWORD = "device" # pylint: disable=invalid-name
2062 WATCHDOG_KEYWORD = "watchdog" # pylint: disable=invalid-name
2063 hostname, *argv = argv
2064 node_dict = _parse_node_options(
2065 hostname,
2066 argv,
2067 additional_options={DEVICE_KEYWORD, WATCHDOG_KEYWORD},
2068 additional_repeatable_options={DEVICE_KEYWORD},
2069 )
2070 if DEVICE_KEYWORD in node_dict:
2071 node_dict[f"{DEVICE_KEYWORD}s"] = node_dict[DEVICE_KEYWORD]
2072 del node_dict[DEVICE_KEYWORD]
2073 return node_dict
2074
2075
2076 def node_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2077 """
2078 Options:
2079 * --wait - wait until new node will start up, effective only when --start
2080 is specified
2081 * --start - start new node
2082 * --enable - enable new node
2083 * --force - treat validation issues and not resolvable addresses as
2084 warnings instead of errors
2085 * --skip-offline - skip unreachable nodes
2086 * --no-watchdog-validation - do not validatate watchdogs
2087 * --request-timeout - HTTP request timeout
2088 """
2089 modifiers.ensure_only_supported(
2090 "--wait",
2091 "--start",
2092 "--enable",
2093 "--force",
2094 "--skip-offline",
2095 "--no-watchdog-validation",
2096 "--request-timeout",
2097 )
2098 if not argv:
2099 raise CmdLineInputError()
2100
2101 node_dict = _parse_add_node(argv)
2102
2103 force_flags = []
2104 if modifiers.get("--force"):
2105 force_flags.append(reports.codes.FORCE)
2106 if modifiers.get("--skip-offline"):
2107 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2108
2109 lib.cluster.add_nodes(
2110 nodes=[node_dict],
2111 wait=modifiers.get("--wait"),
2112 start=modifiers.get("--start"),
2113 enable=modifiers.get("--enable"),
2114 no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
2115 force_flags=force_flags,
2116 )
2117
2118
2119 def remove_nodes_from_cib(
2120 lib: Any, argv: Argv, modifiers: InputModifiers
2121 ) -> None:
2122 """
2123 Options: no options
2124 """
2125 modifiers.ensure_only_supported()
2126 if not argv:
2127 raise CmdLineInputError("No nodes specified")
2128 lib.cluster.remove_nodes_from_cib(argv)
2129
2130
2131 def link_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2132 """
2133 Options:
2134 * --force - treat validation issues and not resolvable addresses as
2135 warnings instead of errors
2136 * --skip-offline - skip unreachable nodes
2137 * --request-timeout - HTTP request timeout
2138 """
2139 modifiers.ensure_only_supported(
2140 "--force", "--request-timeout", "--skip-offline"
2141 )
2142 if not argv:
2143 raise CmdLineInputError()
2144
2145 force_flags = []
2146 if modifiers.get("--force"):
2147 force_flags.append(reports.codes.FORCE)
2148 if modifiers.get("--skip-offline"):
2149 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2150
2151 parsed = parse_args.group_by_keywords(
2152 argv, {"options"}, implicit_first_keyword="nodes"
2153 )
2154 parsed.ensure_unique_keywords()
2155
2156 lib.cluster.add_link(
2157 KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2158 KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2159 force_flags=force_flags,
2160 )
2161
2162
2163 def link_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2164 """
2165 Options:
2166 * --skip-offline - skip unreachable nodes
2167 * --request-timeout - HTTP request timeout
2168 """
2169 modifiers.ensure_only_supported("--request-timeout", "--skip-offline")
2170 if not argv:
2171 raise CmdLineInputError()
2172
2173 force_flags = []
2174 if modifiers.get("--skip-offline"):
2175 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2176
2177 lib.cluster.remove_links(argv, force_flags=force_flags)
2178
2179
2180 def link_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2181 """
2182 Options:
2183 * --force - treat validation issues and not resolvable addresses as
2184 warnings instead of errors
2185 * --skip-offline - skip unreachable nodes
2186 * --request-timeout - HTTP request timeout
2187 """
2188 modifiers.ensure_only_supported(
2189 "--force", "--request-timeout", "--skip-offline"
2190 )
2191 if len(argv) < 2:
2192 raise CmdLineInputError()
2193
2194 force_flags = []
2195 if modifiers.get("--force"):
2196 force_flags.append(reports.codes.FORCE)
2197 if modifiers.get("--skip-offline"):
2198 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2199
2200 linknumber = argv[0]
2201 parsed = parse_args.group_by_keywords(
2202 argv[1:], {"options"}, implicit_first_keyword="nodes"
2203 )
2204 parsed.ensure_unique_keywords()
2205
2206 lib.cluster.update_link(
2207 linknumber,
2208 KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2209 KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2210 force_flags=force_flags,
2211 )
2212
2213
2214 def generate_uuid(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2215 """
2216 Options:
2217 * --force - allow to rewrite an existing UUID in corosync.conf
2218 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
2219 """
2220 modifiers.ensure_only_supported("--force", "--corosync_conf")
2221 if argv:
2222 raise CmdLineInputError()
2223
2224 force_flags = []
2225 if modifiers.get("--force"):
2226 force_flags.append(reports.codes.FORCE)
2227
2228 if not modifiers.is_specified("--corosync_conf"):
2229 lib.cluster.generate_cluster_uuid(force_flags=force_flags)
2230 return
2231
2232 _corosync_conf_local_cmd_call(
2233 modifiers.get("--corosync_conf"),
2234 lambda corosync_conf_content: lib.cluster.generate_cluster_uuid_local(
2235 corosync_conf_content, force_flags=force_flags
2236 ),
2237 )
2238