1 # pylint: disable=too-many-lines
2 import contextlib
3 import datetime
4 import json
5 import math
6 import os
7 import subprocess
8 import sys
9 import tempfile
10 import time
11 import xml.dom.minidom
12 import xml.parsers.expat
13 from typing import Any, Callable, Iterable, Mapping, Optional, Union, cast
14
15 import pcs.lib.pacemaker.live as lib_pacemaker
16 from pcs import settings, utils
17 from pcs.cli.common import parse_args
18 from pcs.cli.common.errors import (
19 ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
20 CmdLineInputError,
21 raise_command_replaced,
22 )
23 from pcs.cli.common.parse_args import (
24 OUTPUT_FORMAT_VALUE_CMD,
25 OUTPUT_FORMAT_VALUE_JSON,
26 Argv,
27 InputModifiers,
28 KeyValueParser,
29 )
30 from pcs.cli.common.tools import print_to_stderr
31 from pcs.cli.file import metadata as file_metadata
32 from pcs.cli.reports import process_library_reports
33 from pcs.cli.reports.messages import report_item_msg_from_dto
34 from pcs.cli.reports.output import warn
35 from pcs.common import file as pcs_file
36 from pcs.common import file_type_codes, reports
37 from pcs.common.corosync_conf import CorosyncConfDto, CorosyncNodeDto
38 from pcs.common.interface import dto
39 from pcs.common.node_communicator import HostNotFound, Request, RequestData
40 from pcs.common.str_tools import format_list, indent, join_multilines
41 from pcs.common.tools import format_os_error
42 from pcs.common.types import StringCollection, StringIterable
43 from pcs.lib import sbd as lib_sbd
44 from pcs.lib.commands.remote_node import _destroy_pcmk_remote_env
45 from pcs.lib.communication.nodes import CheckAuth
46 from pcs.lib.communication.tools import RunRemotelyBase, run_and_raise
47 from pcs.lib.communication.tools import run as run_com_cmd
48 from pcs.lib.corosync import qdevice_net
49 from pcs.lib.corosync.live import QuorumStatusException, QuorumStatusFacade
50 from pcs.lib.errors import LibraryError
51 from pcs.lib.node import get_existing_nodes_names
52 from pcs.utils import parallel_for_nodes
53
54
55 def _corosync_conf_local_cmd_call(
56 corosync_conf_path: parse_args.ModifierValueType,
57 lib_cmd: Callable[[bytes], bytes],
58 ) -> None:
59 """
60 Call a library command that requires modifications of a corosync.conf file
61 supplied as an argument
62
63 The lib command needs to take the corosync.conf file content as its first
64 argument
65
66 lib_cmd -- the lib command to be called
67 """
68 corosync_conf_file = pcs_file.RawFile(
69 file_metadata.for_file_type(
70 file_type_codes.COROSYNC_CONF, corosync_conf_path
71 )
72 )
73
74 try:
75 corosync_conf_file.write(
76 lib_cmd(
77 corosync_conf_file.read(),
78 ),
79 can_overwrite=True,
80 )
81 except pcs_file.RawFileError as e:
82 raise CmdLineInputError(
83 reports.messages.FileIoError(
84 e.metadata.file_type_code,
85 e.action,
86 e.reason,
87 file_path=e.metadata.path,
88 ).message
89 ) from e
90
91
92 def cluster_cib_upgrade_cmd(
93 lib: Any, argv: Argv, modifiers: InputModifiers
94 ) -> None:
95 """
96 Options:
97 * -f - CIB file
98 """
99 del lib
100 modifiers.ensure_only_supported("-f")
101 if argv:
102 raise CmdLineInputError()
103 utils.cluster_upgrade()
104
105
106 def cluster_disable_cmd(
107 lib: Any, argv: Argv, modifiers: InputModifiers
108 ) -> None:
109 """
110 Options:
111 * --all - disable all cluster nodes
112 * --request-timeout - timeout for HTTP requests - effective only when at
113 least one node has been specified or --all has been used
114 """
115 del lib
116 modifiers.ensure_only_supported("--all", "--request-timeout")
117 if modifiers.get("--all"):
118 if argv:
119 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
120 disable_cluster_all()
121 else:
122 disable_cluster(argv)
123
124
125 def cluster_enable_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
126 """
127 Options:
128 * --all - enable all cluster nodes
129 * --request-timeout - timeout for HTTP requests - effective only when at
130 least one node has been specified or --all has been used
131 """
132 del lib
133 modifiers.ensure_only_supported("--all", "--request-timeout")
134 if modifiers.get("--all"):
135 if argv:
136 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
137 enable_cluster_all()
138 else:
139 enable_cluster(argv)
140
141
142 def cluster_stop_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
143 """
144 Options:
145 * --force - no error when possible quorum loss
146 * --request-timeout - timeout for HTTP requests - effective only when at
147 least one node has been specified
148 * --pacemaker - stop pacemaker, only effective when no node has been
149 specified
150 * --corosync - stop corosync, only effective when no node has been
151 specified
152 * --all - stop all cluster nodes
153 """
154 del lib
155 modifiers.ensure_only_supported(
156 "--wait",
157 "--request-timeout",
158 "--pacemaker",
159 "--corosync",
160 "--all",
161 "--force",
162 )
163 if modifiers.get("--all"):
164 if argv:
165 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
166 stop_cluster_all()
167 else:
168 stop_cluster(argv)
169
170
171 def cluster_start_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
172 """
173 Options:
174 * --wait
175 * --request-timeout - timeout for HTTP requests, have effect only if at
176 least one node have been specified
177 * --all - start all cluster nodes
178 """
179 del lib
180 modifiers.ensure_only_supported(
181 "--wait", "--request-timeout", "--all", "--corosync_conf"
182 )
183 if modifiers.get("--all"):
184 if argv:
185 utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
186 start_cluster_all()
187 else:
188 start_cluster(argv)
189
190
191 def authkey_corosync(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
192 """
193 Options:
194 * --force - skip check for authkey length
195 * --request-timeout - timeout for HTTP requests
196 * --skip-offline - skip unreachable nodes
197 """
198 modifiers.ensure_only_supported(
199 "--force", "--skip-offline", "--request-timeout"
200 )
201 if len(argv) > 1:
202 raise CmdLineInputError()
203 force_flags = []
204 if modifiers.get("--force"):
205 force_flags.append(reports.codes.FORCE)
206 if modifiers.get("--skip-offline"):
207 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
208 corosync_authkey = None
209 if argv:
210 try:
211 with open(argv[0], "rb") as file:
212 corosync_authkey = file.read()
213 except OSError as e:
214 utils.err(f"Unable to read file '{argv[0]}': {format_os_error(e)}")
215 lib.cluster.corosync_authkey_change(
216 corosync_authkey=corosync_authkey,
217 force_flags=force_flags,
218 )
219
220
221 def sync_nodes(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
222 """
223 Options:
224 * --request-timeout - timeout for HTTP requests
225 """
226 del lib
227 modifiers.ensure_only_supported("--request-timeout")
228 if argv:
229 raise CmdLineInputError()
230
231 config = utils.getCorosyncConf()
232 nodes, report_list = get_existing_nodes_names(
233 utils.get_corosync_conf_facade(conf_text=config)
234 )
235 if not nodes:
236 report_list.append(
237 reports.ReportItem.error(
238 reports.messages.CorosyncConfigNoNodesDefined()
239 )
240 )
241 if report_list:
242 process_library_reports(report_list)
243
244 for node in nodes:
245 utils.setCorosyncConfig(node, config)
246
247 warn(
248 "Corosync configuration has been synchronized, please reload corosync "
249 "daemon using 'pcs cluster reload corosync' command."
250 )
251
252
253 def start_cluster(argv: Argv) -> None:
254 """
255 Commandline options:
256 * --wait
257 * --request-timeout - timeout for HTTP requests, have effect only if at
258 least one node have been specified
259 """
260 wait = False
261 wait_timeout = None
262 if "--wait" in utils.pcs_options:
263 wait_timeout = utils.validate_wait_get_timeout(False)
264 wait = True
265
266 if argv:
267 nodes = set(argv) # unique
268 start_cluster_nodes(nodes)
269 if wait:
270 wait_for_nodes_started(nodes, wait_timeout)
271 return
272
273 if not utils.hasCorosyncConf():
274 utils.err("cluster is not currently configured on this node")
275
276 print_to_stderr("Starting Cluster...")
277 service_list = ["corosync"]
278 if utils.need_to_handle_qdevice_service():
279 service_list.append("corosync-qdevice")
280 service_list.append("pacemaker")
281 for service in service_list:
282 utils.start_service(service)
283 if wait:
284 wait_for_nodes_started([], wait_timeout)
285
286
287 def start_cluster_all() -> None:
288 """
289 Commandline options:
290 * --wait
291 * --request-timeout - timeout for HTTP requests
292 """
293 wait = False
294 wait_timeout = None
295 if "--wait" in utils.pcs_options:
296 wait_timeout = utils.validate_wait_get_timeout(False)
297 wait = True
298
299 all_nodes, report_list = get_existing_nodes_names(
300 utils.get_corosync_conf_facade()
301 )
302 if not all_nodes:
303 report_list.append(
304 reports.ReportItem.error(
305 reports.messages.CorosyncConfigNoNodesDefined()
306 )
307 )
308 if report_list:
309 process_library_reports(report_list)
310
311 start_cluster_nodes(all_nodes)
312 if wait:
313 wait_for_nodes_started(all_nodes, wait_timeout)
314
315
316 def start_cluster_nodes(nodes: StringCollection) -> None:
317 """
318 Commandline options:
319 * --request-timeout - timeout for HTTP requests
320 """
321 # Large clusters take longer time to start up. So we make the timeout longer
322 # for each 8 nodes:
323 # 1 - 8 nodes: 1 * timeout
324 # 9 - 16 nodes: 2 * timeout
325 # 17 - 24 nodes: 3 * timeout
326 # and so on
327 # Users can override this and set their own timeout by specifying
328 # the --request-timeout option (see utils.sendHTTPRequest).
329 timeout = int(
330 settings.default_request_timeout * math.ceil(len(nodes) / 8.0)
331 )
332 utils.read_known_hosts_file() # cache known hosts
333 node_errors = parallel_for_nodes(
334 utils.startCluster, nodes, quiet=True, timeout=timeout
335 )
336 if node_errors:
337 utils.err(
338 "unable to start all nodes\n" + "\n".join(node_errors.values())
339 )
340
341
342 def is_node_fully_started(node_status) -> bool:
343 """
344 Commandline options: no options
345 """
346 return (
347 "online" in node_status
348 and "pending" in node_status
349 and node_status["online"]
350 and not node_status["pending"]
351 )
352
353
354 def wait_for_local_node_started(
355 stop_at: datetime.datetime, interval: float
356 ) -> tuple[int, str]:
357 """
358 Commandline options: no options
359 """
360 try:
361 while True:
362 time.sleep(interval)
363 node_status = lib_pacemaker.get_local_node_status(
364 utils.cmd_runner()
365 )
366 if is_node_fully_started(node_status):
367 return 0, "Started"
368 if datetime.datetime.now() > stop_at:
369 return 1, "Waiting timeout"
370 except LibraryError as e:
371 return (
372 1,
373 "Unable to get node status: {0}".format(
374 "\n".join(
375 report_item_msg_from_dto(
376 cast(reports.ReportItemDto, item).message
377 ).message
378 for item in e.args
379 )
380 ),
381 )
382
383
384 def wait_for_remote_node_started(
385 node: str, stop_at: datetime.datetime, interval: float
386 ) -> tuple[int, str]:
387 """
388 Commandline options:
389 * --request-timeout - timeout for HTTP requests
390 """
391 while True:
392 time.sleep(interval)
393 code, output = utils.getPacemakerNodeStatus(node)
394 # HTTP error, permission denied or unable to auth
395 # there is no point in trying again as it won't get magically fixed
396 if code in [1, 3, 4]:
397 return 1, output
398 if code == 0:
399 try:
400 node_status = json.loads(output)
401 if is_node_fully_started(node_status):
402 return 0, "Started"
403 except (ValueError, KeyError):
404 # this won't get fixed either
405 return 1, "Unable to get node status"
406 if datetime.datetime.now() > stop_at:
407 return 1, "Waiting timeout"
408
409
410 def wait_for_nodes_started(
411 node_list: StringIterable, timeout: Optional[int] = None
412 ) -> None:
413 """
414 Commandline options:
415 * --request-timeout - timeout for HTTP request, effective only if
416 node_list is not empty list
417 """
418 timeout = 60 * 15 if timeout is None else timeout
419 interval = 2
420 stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
421 print_to_stderr("Waiting for node(s) to start...")
422 if not node_list:
423 code, output = wait_for_local_node_started(stop_at, interval)
424 if code != 0:
425 utils.err(output)
426 else:
427 print_to_stderr(output)
428 else:
429 utils.read_known_hosts_file() # cache known hosts
430 node_errors = parallel_for_nodes(
431 wait_for_remote_node_started, node_list, stop_at, interval
432 )
433 if node_errors:
434 utils.err("unable to verify all nodes have started")
435
436
437 def stop_cluster_all() -> None:
438 """
439 Commandline options:
440 * --force - no error when possible quorum loss
441 * --request-timeout - timeout for HTTP requests
442 """
443 all_nodes, report_list = get_existing_nodes_names(
444 utils.get_corosync_conf_facade()
445 )
446 if not all_nodes:
447 report_list.append(
448 reports.ReportItem.error(
449 reports.messages.CorosyncConfigNoNodesDefined()
450 )
451 )
452 if report_list:
453 process_library_reports(report_list)
454
455 stop_cluster_nodes(all_nodes)
456
457
458 def stop_cluster_nodes(nodes: StringCollection) -> None: # noqa: PLR0912
459 """
460 Commandline options:
461 * --force - no error when possible quorum loss
462 * --request-timeout - timeout for HTTP requests
463 """
464 # pylint: disable=too-many-branches
465 all_nodes, report_list = get_existing_nodes_names(
466 utils.get_corosync_conf_facade()
467 )
468 unknown_nodes = set(nodes) - set(all_nodes)
469 if unknown_nodes:
470 if report_list:
471 process_library_reports(report_list)
472 utils.err(
473 "nodes '%s' do not appear to exist in configuration"
474 % "', '".join(sorted(unknown_nodes))
475 )
476
477 utils.read_known_hosts_file() # cache known hosts
478 stopping_all = set(nodes) >= set(all_nodes)
479 if "--force" not in utils.pcs_options and not stopping_all:
480 error_list = []
481 for node in nodes:
482 retval, data = utils.get_remote_quorumtool_output(node)
483 if retval != 0:
484 error_list.append(node + ": " + data)
485 continue
486 try:
487 quorum_status_facade = QuorumStatusFacade.from_string(data)
488 if not quorum_status_facade.is_quorate:
489 # Get quorum status from a quorate node, non-quorate nodes
490 # may provide inaccurate info. If no node is quorate, there
491 # is no quorum to be lost and therefore no error to be
492 # reported.
493 continue
494 if quorum_status_facade.stopping_nodes_cause_quorum_loss(nodes):
495 utils.err(
496 "Stopping the node(s) will cause a loss of the quorum"
497 + ", use --force to override"
498 )
499 else:
500 # We have the info, no need to print errors
501 error_list = []
502 break
503 except QuorumStatusException:
504 if not utils.is_node_offline_by_quorumtool_output(data):
505 error_list.append(node + ": Unable to get quorum status")
506 # else the node seems to be stopped already
507 if error_list:
508 utils.err(
509 "Unable to determine whether stopping the nodes will cause "
510 + "a loss of the quorum, use --force to override\n"
511 + "\n".join(error_list)
512 )
513
514 was_error = False
515 node_errors = parallel_for_nodes(
516 utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
517 )
518 accessible_nodes = [node for node in nodes if node not in node_errors]
519 if node_errors:
520 utils.err(
521 "unable to stop all nodes\n" + "\n".join(node_errors.values()),
522 exit_after_error=not accessible_nodes,
523 )
524 was_error = True
525
526 for node in node_errors:
527 print_to_stderr(
528 "{0}: Not stopping cluster - node is unreachable".format(node)
529 )
530
531 node_errors = parallel_for_nodes(
532 utils.stopCorosync, accessible_nodes, quiet=True
533 )
534 if node_errors:
535 utils.err(
536 "unable to stop all nodes\n" + "\n".join(node_errors.values())
537 )
538 if was_error:
539 utils.err("unable to stop all nodes")
540
541
542 def enable_cluster(argv: Argv) -> None:
543 """
544 Commandline options:
545 * --request-timeout - timeout for HTTP requests, effective only if at
546 least one node has been specified
547 """
548 if argv:
549 enable_cluster_nodes(argv)
550 return
551
552 try:
553 utils.enableServices()
554 except LibraryError as e:
555 process_library_reports(list(e.args))
556
557
558 def disable_cluster(argv: Argv) -> None:
559 """
560 Commandline options:
561 * --request-timeout - timeout for HTTP requests, effective only if at
562 least one node has been specified
563 """
564 if argv:
565 disable_cluster_nodes(argv)
566 return
567
568 try:
569 utils.disableServices()
570 except LibraryError as e:
571 process_library_reports(list(e.args))
572
573
574 def enable_cluster_all() -> None:
575 """
576 Commandline options:
577 * --request-timeout - timeout for HTTP requests
578 """
579 all_nodes, report_list = get_existing_nodes_names(
580 utils.get_corosync_conf_facade()
581 )
582 if not all_nodes:
583 report_list.append(
584 reports.ReportItem.error(
585 reports.messages.CorosyncConfigNoNodesDefined()
586 )
587 )
588 if report_list:
589 process_library_reports(report_list)
590
591 enable_cluster_nodes(all_nodes)
592
593
594 def disable_cluster_all() -> None:
595 """
596 Commandline options:
597 * --request-timeout - timeout for HTTP requests
598 """
599 all_nodes, report_list = get_existing_nodes_names(
600 utils.get_corosync_conf_facade()
601 )
602 if not all_nodes:
603 report_list.append(
604 reports.ReportItem.error(
605 reports.messages.CorosyncConfigNoNodesDefined()
606 )
607 )
608 if report_list:
609 process_library_reports(report_list)
610
611 disable_cluster_nodes(all_nodes)
612
613
614 def enable_cluster_nodes(nodes: StringIterable) -> None:
615 """
616 Commandline options:
617 * --request-timeout - timeout for HTTP requests
618 """
619 error_list = utils.map_for_error_list(utils.enableCluster, nodes)
620 if error_list:
621 utils.err("unable to enable all nodes\n" + "\n".join(error_list))
622
623
624 def disable_cluster_nodes(nodes: StringIterable) -> None:
625 """
626 Commandline options:
627 * --request-timeout - timeout for HTTP requests
628 """
629 error_list = utils.map_for_error_list(utils.disableCluster, nodes)
630 if error_list:
631 utils.err("unable to disable all nodes\n" + "\n".join(error_list))
632
633
634 def destroy_cluster(argv: Argv) -> None:
635 """
636 Commandline options:
637 * --request-timeout - timeout for HTTP requests
638 """
639 if argv:
640 utils.read_known_hosts_file() # cache known hosts
641 # stop pacemaker and resources while cluster is still quorate
642 nodes = argv
643 node_errors = parallel_for_nodes(
644 utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True
645 )
646 # proceed with destroy regardless of errors
647 # destroy will stop any remaining cluster daemons
648 node_errors = parallel_for_nodes(
649 utils.destroyCluster, nodes, quiet=True
650 )
651 if node_errors:
652 utils.err(
653 "unable to destroy cluster\n" + "\n".join(node_errors.values())
654 )
655
656
657 def stop_cluster(argv: Argv) -> None:
658 """
659 Commandline options:
660 * --force - no error when possible quorum loss
661 * --request-timeout - timeout for HTTP requests - effective only when at
662 least one node has been specified
663 * --pacemaker - stop pacemaker, only effective when no node has been
664 specified
665 """
666 if argv:
667 stop_cluster_nodes(argv)
668 return
669
670 if "--force" not in utils.pcs_options:
671 # corosync 3.0.1 and older:
672 # - retval is 0 on success if a node is not in a partition with quorum
673 # - retval is 1 on error OR on success if a node has quorum
674 # corosync 3.0.2 and newer:
675 # - retval is 0 on success if a node has quorum
676 # - retval is 1 on error
677 # - retval is 2 on success if a node is not in a partition with quorum
678 output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"])
679 try:
680 if QuorumStatusFacade.from_string(
681 output
682 ).stopping_local_node_cause_quorum_loss():
683 utils.err(
684 "Stopping the node will cause a loss of the quorum"
685 + ", use --force to override"
686 )
687 except QuorumStatusException:
688 if not utils.is_node_offline_by_quorumtool_output(output):
689 utils.err(
690 "Unable to determine whether stopping the node will cause "
691 + "a loss of the quorum, use --force to override"
692 )
693 # else the node seems to be stopped already, proceed to be sure
694
695 stop_all = (
696 "--pacemaker" not in utils.pcs_options
697 and "--corosync" not in utils.pcs_options
698 )
699 if stop_all or "--pacemaker" in utils.pcs_options:
700 stop_cluster_pacemaker()
701 if stop_all or "--corosync" in utils.pcs_options:
702 stop_cluster_corosync()
703
704
705 def stop_cluster_pacemaker() -> None:
706 """
707 Commandline options: no options
708 """
709 print_to_stderr("Stopping Cluster (pacemaker)...")
710 utils.stop_service("pacemaker")
711
712
713 def stop_cluster_corosync() -> None:
714 """
715 Commandline options: no options
716 """
717 print_to_stderr("Stopping Cluster (corosync)...")
718 service_list = []
719 if utils.need_to_handle_qdevice_service():
720 service_list.append("corosync-qdevice")
721 service_list.append("corosync")
722 for service in service_list:
723 utils.stop_service(service)
724
725
726 def kill_cluster(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
727 """
728 Options: no options
729 """
730 del lib
731 if argv:
732 raise CmdLineInputError()
733 modifiers.ensure_only_supported()
734 dummy_output, dummy_retval = kill_local_cluster_services()
735
736
737 # if dummy_retval != 0:
738 # print "Error: unable to execute killall -9"
739 # print output
740 # sys.exit(1)
741
742
743 def kill_local_cluster_services() -> tuple[str, int]:
744 """
745 Commandline options: no options
746 """
747 all_cluster_daemons = [
748 # Daemons taken from cluster-clean script in pacemaker
749 "pacemaker-attrd",
750 "pacemaker-based",
751 "pacemaker-controld",
752 "pacemaker-execd",
753 "pacemaker-fenced",
754 "pacemaker-remoted",
755 "pacemaker-schedulerd",
756 "pacemakerd",
757 "dlm_controld",
758 "gfs_controld",
759 # Corosync daemons
760 "corosync-qdevice",
761 "corosync",
762 ]
763 return utils.run([settings.killall_exec, "-9"] + all_cluster_daemons)
764
765
766 def cluster_push(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912, PLR0915
767 """
768 Options:
769 * --wait
770 * --config - push only configuration section of CIB
771 * -f - CIB file
772 """
773 # pylint: disable=too-many-branches
774 # pylint: disable=too-many-locals
775 # pylint: disable=too-many-statements
776
777 def get_details_from_crm_verify():
778 # get a new runner to run crm_verify command and pass the CIB filename
779 # into it so that the verify is run on the file instead on the live
780 # cluster CIB
781 verify_runner = utils.cmd_runner(cib_file_override=filename)
782 # Request verbose output, otherwise we may only get an unhelpful
783 # message:
784 # Configuration invalid (with errors) (-V may provide more detail)
785 # verify_returncode is always expected to be non-zero to indicate
786 # invalid CIB - ve run the verify because the CIB is invalid
787 (
788 verify_stdout,
789 verify_stderr,
790 verify_returncode,
791 verify_can_be_more_verbose,
792 ) = lib_pacemaker.verify(verify_runner, verbose=True)
793 return join_multilines([verify_stdout, verify_stderr])
794
795 del lib
796 modifiers.ensure_only_supported("--wait", "--config", "-f")
797 if len(argv) > 2:
798 raise CmdLineInputError()
799
800 filename = None
801 scope = None
802 timeout = None
803 diff_against = None
804
805 if modifiers.get("--wait"):
806 timeout = utils.validate_wait_get_timeout()
807 for arg in argv:
808 if "=" not in arg:
809 filename = arg
810 else:
811 arg_name, arg_value = arg.split("=", 1)
812 if arg_name == "scope":
813 if modifiers.get("--config"):
814 utils.err("Cannot use both scope and --config")
815 if not utils.is_valid_cib_scope(arg_value):
816 utils.err("invalid CIB scope '%s'" % arg_value)
817 else:
818 scope = arg_value
819 elif arg_name == "diff-against":
820 diff_against = arg_value
821 else:
822 raise CmdLineInputError()
823 if modifiers.get("--config"):
824 scope = "configuration"
825 if diff_against and scope:
826 utils.err("Cannot use both scope and diff-against")
827 if not filename:
828 raise CmdLineInputError()
829
830 try:
|
(1) Event Sigma main event: |
The application uses Python's built in `xml` module which does not properly handle erroneous or maliciously constructed data, making the application vulnerable to one or more types of XML attacks. |
|
(2) Event remediation: |
Avoid using the `xml` module. Consider using the `defusedxml` module or similar which safely prevents all XML entity attacks. |
831 new_cib_dom = xml.dom.minidom.parse(filename)
832 if scope and not new_cib_dom.getElementsByTagName(scope):
833 utils.err(
834 "unable to push cib, scope '%s' not present in new cib" % scope
835 )
836 except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
837 utils.err("unable to parse new cib: %s" % e)
838
839 EXITCODE_INVALID_CIB = 78
840 runner = utils.cmd_runner()
841
842 if diff_against:
843 command = [
844 settings.crm_diff_exec,
845 "--original",
846 diff_against,
847 "--new",
848 filename,
849 "--no-version",
850 ]
851 patch, stderr, retval = runner.run(command)
852 # 0 (CRM_EX_OK) - success with no difference
853 # 1 (CRM_EX_ERROR) - success with difference
854 # 64 (CRM_EX_USAGE) - usage error
855 # 65 (CRM_EX_DATAERR) - XML fragments not parseable
856 if retval > 1:
857 utils.err("unable to diff the CIBs:\n" + stderr)
858 if retval == 0:
859 print_to_stderr(
860 "The new CIB is the same as the original CIB, nothing to push."
861 )
862 sys.exit(0)
863
864 command = [
865 settings.cibadmin_exec,
866 "--patch",
867 "--xml-pipe",
868 ]
869 output, stderr, retval = runner.run(command, patch)
870 if retval != 0:
871 push_output = stderr + output
872 verify_output = (
873 get_details_from_crm_verify()
874 if retval == EXITCODE_INVALID_CIB
875 else ""
876 )
877 error_text = (
878 f"{push_output}\n\n{verify_output}"
879 if verify_output.strip()
880 else push_output
881 )
882 utils.err("unable to push cib\n" + error_text)
883
884 else:
885 command = ["cibadmin", "--replace", "--xml-file", filename]
886 if scope:
887 command.append("--scope=%s" % scope)
888 output, retval = utils.run(command)
889 # 103 (CRM_EX_OLD) - update older than existing config
890 if retval == 103:
891 utils.err(
892 "Unable to push to the CIB because pushed configuration "
893 "is older than existing one. If you are sure you want to "
894 "push this configuration, try to use --config to replace only "
895 "configuration part instead of whole CIB. Otherwise get current"
896 " configuration by running command 'pcs cluster cib' and update"
897 " that."
898 )
899 elif retval != 0:
900 verify_output = (
901 get_details_from_crm_verify()
902 if retval == EXITCODE_INVALID_CIB
903 else ""
904 )
905 error_text = (
906 f"{output}\n\n{verify_output}"
907 if verify_output.strip()
908 else output
909 )
910 utils.err("unable to push cib\n" + error_text)
911
912 print_to_stderr("CIB updated")
913 try:
914 cib_errors = lib_pacemaker.get_cib_verification_errors(runner)
915 if cib_errors:
916 print_to_stderr("\n".join(cib_errors))
917 except lib_pacemaker.BadApiResultFormat as e:
918 print_to_stderr(
919 f"Unable to verify CIB: {e.original_exception}\n"
920 f"crm_verify output:\n{e.pacemaker_response}"
921 )
922
923 if not modifiers.is_specified("--wait"):
924 return
925 cmd = ["crm_resource", "--wait"]
926 if timeout:
927 cmd.extend(["--timeout", str(timeout)])
928 output, retval = utils.run(cmd)
929 if retval != 0:
930 msg = []
931 if retval == settings.pacemaker_wait_timeout_status:
932 msg.append("waiting timeout")
933 if output:
934 msg.append("\n" + output)
935 utils.err("\n".join(msg).strip())
936
937
938 def cluster_edit(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
939 """
940 Options:
941 * --config - edit configuration section of CIB
942 * -f - CIB file
943 * --wait
944 """
945 # pylint: disable=too-many-branches
946 modifiers.ensure_only_supported("--config", "--wait", "-f")
947 if "EDITOR" in os.environ:
948 if len(argv) > 1:
949 raise CmdLineInputError()
950
951 scope = None
952 scope_arg = ""
953 for arg in argv:
954 if "=" not in arg:
955 raise CmdLineInputError()
956 arg_name, arg_value = arg.split("=", 1)
957 if arg_name == "scope" and not modifiers.get("--config"):
958 if not utils.is_valid_cib_scope(arg_value):
959 utils.err("invalid CIB scope '%s'" % arg_value)
960 else:
961 scope_arg = arg
962 scope = arg_value
963 else:
964 raise CmdLineInputError()
965 if modifiers.get("--config"):
966 scope = "configuration"
967 # Leave scope_arg empty as cluster_push will pick up a --config
968 # option from utils.pcs_options
969 scope_arg = ""
970
971 editor = os.environ["EDITOR"]
972 cib = utils.get_cib(scope)
973 with tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs") as tempcib:
974 tempcib.write(cib)
975 tempcib.flush()
976 try:
977 subprocess.call([editor, tempcib.name])
978 except OSError:
979 utils.err("unable to open file with $EDITOR: " + editor)
980
981 tempcib.seek(0)
982 newcib = "".join(tempcib.readlines())
983 if newcib == cib:
984 print_to_stderr("CIB not updated, no changes detected")
985 else:
986 cluster_push(
987 lib,
988 [arg for arg in [tempcib.name, scope_arg] if arg],
989 modifiers.get_subset("--wait", "--config", "-f"),
990 )
991
992 else:
993 utils.err("$EDITOR environment variable is not set")
994
995
996 def get_cib(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
997 """
998 Options:
999 * --config show configuration section of CIB
1000 * -f - CIB file
1001 """
1002 # pylint: disable=too-many-branches
1003 del lib
1004 modifiers.ensure_only_supported("--config", "-f")
1005 if len(argv) > 2:
1006 raise CmdLineInputError()
1007
1008 filename = None
1009 scope = None
1010 for arg in argv:
1011 if "=" not in arg:
1012 filename = arg
1013 else:
1014 arg_name, arg_value = arg.split("=", 1)
1015 if arg_name == "scope" and not modifiers.get("--config"):
1016 if not utils.is_valid_cib_scope(arg_value):
1017 utils.err("invalid CIB scope '%s'" % arg_value)
1018 else:
1019 scope = arg_value
1020 else:
1021 raise CmdLineInputError()
1022 if modifiers.get("--config"):
1023 scope = "configuration"
1024
1025 if not filename:
1026 print(utils.get_cib(scope).rstrip())
1027 else:
1028 output = utils.get_cib(scope)
1029 if not output:
1030 utils.err("No data in the CIB")
1031 try:
1032 with open(filename, "w") as cib_file:
1033 cib_file.write(output)
1034 except EnvironmentError as e:
1035 utils.err(
1036 "Unable to write to file '%s', %s" % (filename, e.strerror)
1037 )
1038
1039
1040 class RemoteAddNodes(RunRemotelyBase):
1041 def __init__(self, report_processor, target, data):
1042 super().__init__(report_processor)
1043 self._target = target
1044 self._data = data
1045 self._success = False
1046
1047 def get_initial_request_list(self):
1048 return [
1049 Request(
1050 self._target,
1051 RequestData(
1052 "remote/cluster_add_nodes",
1053 [("data_json", json.dumps(self._data))],
1054 ),
1055 )
1056 ]
1057
1058 def _process_response(self, response):
1059 node_label = response.request.target.label
1060 report_item = self._get_response_report(response)
1061 if report_item is not None:
1062 self._report(report_item)
1063 return
1064
1065 try:
1066 output = json.loads(response.data)
1067 for report_dict in output["report_list"]:
1068 self._report(
1069 reports.ReportItem(
1070 severity=reports.ReportItemSeverity(
1071 report_dict["severity"],
1072 report_dict["forceable"],
1073 ),
1074 message=reports.messages.LegacyCommonMessage(
1075 report_dict["code"],
1076 report_dict["info"],
1077 report_dict["report_text"],
1078 ),
1079 )
1080 )
1081 if output["status"] == "success":
1082 self._success = True
1083 elif output["status"] != "error":
1084 print_to_stderr("Error: {}".format(output["status_msg"]))
1085
1086 except (KeyError, json.JSONDecodeError):
1087 self._report(
1088 reports.ReportItem.warning(
1089 reports.messages.InvalidResponseFormat(node_label)
1090 )
1091 )
1092
1093 def on_complete(self):
1094 return self._success
1095
1096
1097 def node_add_outside_cluster(
1098 lib: Any, argv: Argv, modifiers: InputModifiers
1099 ) -> None:
1100 """
1101 Options:
1102 * --wait - wait until new node will start up, effective only when --start
1103 is specified
1104 * --start - start new node
1105 * --enable - enable new node
1106 * --force - treat validation issues and not resolvable addresses as
1107 warnings instead of errors
1108 * --skip-offline - skip unreachable nodes
1109 * --no-watchdog-validation - do not validatate watchdogs
1110 * --request-timeout - HTTP request timeout
1111 """
1112 del lib
1113 modifiers.ensure_only_supported(
1114 "--wait",
1115 "--start",
1116 "--enable",
1117 "--force",
1118 "--skip-offline",
1119 "--no-watchdog-validation",
1120 "--request-timeout",
1121 )
1122 if len(argv) < 2:
1123 raise CmdLineInputError(
1124 "Usage: pcs cluster node add-outside <cluster node> <node name> "
1125 "[addr=<node address>]... [watchdog=<watchdog path>] "
1126 "[device=<SBD device path>]... [--start [--wait[=<n>]]] [--enable] "
1127 "[--no-watchdog-validation]"
1128 )
1129
1130 cluster_node, *argv = argv
1131 node_dict = _parse_add_node(argv)
1132
1133 force_flags = []
1134 if modifiers.get("--force"):
1135 force_flags.append(reports.codes.FORCE)
1136 if modifiers.get("--skip-offline"):
1137 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1138 cmd_data = dict(
1139 nodes=[node_dict],
1140 wait=modifiers.get("--wait"),
1141 start=modifiers.get("--start"),
1142 enable=modifiers.get("--enable"),
1143 no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
1144 force_flags=force_flags,
1145 )
1146
1147 lib_env = utils.get_lib_env()
1148 report_processor = lib_env.report_processor
1149 target_factory = lib_env.get_node_target_factory()
1150 report_list, target_list = target_factory.get_target_list_with_reports(
1151 [cluster_node],
1152 skip_non_existing=False,
1153 allow_skip=False,
1154 )
1155 report_processor.report_list(report_list)
1156 if report_processor.has_errors:
1157 raise LibraryError()
1158
1159 com_cmd = RemoteAddNodes(report_processor, target_list[0], cmd_data)
1160 was_successful = run_com_cmd(lib_env.get_node_communicator(), com_cmd)
1161
1162 if not was_successful:
1163 raise LibraryError()
1164
1165
1166 def node_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1167 """
1168 Options:
1169 * --force - continue even though the action may cause qourum loss
1170 * --skip-offline - skip unreachable nodes
1171 * --request-timeout - HTTP request timeout
1172 """
1173 modifiers.ensure_only_supported(
1174 "--force",
1175 "--skip-offline",
1176 "--request-timeout",
1177 )
1178 if not argv:
1179 raise CmdLineInputError()
1180
1181 force_flags = []
1182 if modifiers.get("--force"):
1183 force_flags.append(reports.codes.FORCE)
1184 if modifiers.get("--skip-offline"):
1185 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
1186
1187 lib.cluster.remove_nodes(argv, force_flags=force_flags)
1188
1189
1190 def cluster_uidgid( # noqa: PLR0912
1191 lib: Any, argv: Argv, modifiers: InputModifiers, silent_list: bool = False
1192 ) -> None:
1193 """
1194 Options: no options
1195 """
1196 # pylint: disable=too-many-branches
1197 # pylint: disable=too-many-locals
1198 del lib
1199 modifiers.ensure_only_supported()
1200 if not argv:
1201 uid_gid_files = os.listdir(settings.corosync_uidgid_dir)
1202 uid_gid_lines: list[str] = []
1203 for ug_file in uid_gid_files:
1204 uid_gid_dict = utils.read_uid_gid_file(ug_file)
1205 if "uid" in uid_gid_dict or "gid" in uid_gid_dict:
1206 line = "UID/GID: uid="
1207 if "uid" in uid_gid_dict:
1208 line += uid_gid_dict["uid"]
1209 line += " gid="
1210 if "gid" in uid_gid_dict:
1211 line += uid_gid_dict["gid"]
1212
1213 uid_gid_lines.append(line)
1214 if uid_gid_lines:
1215 print("\n".join(sorted(uid_gid_lines)))
1216 elif not silent_list:
1217 print_to_stderr("No uidgids configured")
1218 return
1219
1220 command = argv.pop(0)
1221 uid = ""
1222 gid = ""
1223
1224 if command in {"add", "delete", "remove"} and argv:
1225 for arg in argv:
1226 if arg.find("=") == -1:
1227 utils.err(
1228 "uidgid options must be of the form uid=<uid> gid=<gid>"
1229 )
1230
1231 (key, value) = arg.split("=", 1)
1232 if key not in {"uid", "gid"}:
1233 utils.err(
1234 "%s is not a valid key, you must use uid or gid" % key
1235 )
1236
1237 if key == "uid":
1238 uid = value
1239 if key == "gid":
1240 gid = value
1241 if uid == "" and gid == "":
1242 utils.err("you must set either uid or gid")
1243
1244 if command == "add":
1245 utils.write_uid_gid_file(uid, gid)
1246 elif command in {"delete", "remove"}:
1247 file_removed = utils.remove_uid_gid_file(uid, gid)
1248 if not file_removed:
1249 utils.err(
1250 "no uidgid files with uid=%s and gid=%s found" % (uid, gid)
1251 )
1252 elif command == "rm":
1253 # To be removed in the next significant version
1254 raise_command_replaced(
1255 [
1256 "pcs cluster uidgid delete",
1257 "pcs cluster uidgid remove",
1258 ],
1259 pcs_version="0.11",
1260 )
1261 else:
1262 raise CmdLineInputError()
1263
1264
1265 def cluster_get_corosync_conf(
1266 lib: Any, argv: Argv, modifiers: InputModifiers
1267 ) -> None:
1268 """
1269 Options:
1270 * --request-timeout - timeout for HTTP requests, effetive only when at
1271 least one node has been specified
1272 """
1273 del lib
1274 modifiers.ensure_only_supported("--request-timeout")
1275 if len(argv) > 1:
1276 raise CmdLineInputError()
1277
1278 if not argv:
1279 print(utils.getCorosyncConf().rstrip())
1280 return
1281
1282 node = argv[0]
1283 retval, output = utils.getCorosyncConfig(node)
1284 if retval != 0:
1285 utils.err(output)
1286 else:
1287 print(output.rstrip())
1288
1289
1290 def cluster_reload(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1291 """
1292 Options: no options
1293 """
1294 del lib
1295 modifiers.ensure_only_supported()
1296 if len(argv) != 1 or argv[0] != "corosync":
1297 raise CmdLineInputError()
1298
1299 output, retval = utils.reloadCorosync()
1300 if retval != 0 or "invalid option" in output:
1301 utils.err(output.rstrip())
1302 print_to_stderr("Corosync reloaded")
1303
1304
1305 # Completely tear down the cluster & remove config files
1306 # Code taken from cluster-clean script in pacemaker
1307 def cluster_destroy(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1308 """
1309 Options:
1310 * --all - destroy cluster on all cluster nodes => destroy whole cluster
1311 * --request-timeout - timeout of HTTP requests, effective only with --all
1312 """
1313 # pylint: disable=too-many-branches
1314 # pylint: disable=too-many-statements
1315 del lib
1316 modifiers.ensure_only_supported("--all", "--request-timeout", "--force")
1317 if argv:
1318 raise CmdLineInputError()
1319 if utils.is_run_interactive():
1320 warn(
1321 "It is recommended to run 'pcs cluster stop' before "
1322 "destroying the cluster."
1323 )
1324 if not utils.get_continue_confirmation_or_force(
1325 "This would kill all cluster processes and then PERMANENTLY remove "
1326 "cluster state and configuration",
1327 bool(modifiers.get("--force")),
1328 ):
1329 return
1330 if modifiers.get("--all"):
1331 # load data
1332 cib = None
1333 lib_env = utils.get_lib_env()
1334 try:
1335 cib = lib_env.get_cib()
1336 except LibraryError:
1337 warn(
1338 "Unable to load CIB to get guest and remote nodes from it, "
1339 "those nodes will not be deconfigured."
1340 )
1341 corosync_nodes, report_list = get_existing_nodes_names(
1342 utils.get_corosync_conf_facade()
1343 )
1344 if not corosync_nodes:
1345 report_list.append(
1346 reports.ReportItem.error(
1347 reports.messages.CorosyncConfigNoNodesDefined()
1348 )
1349 )
1350 if report_list:
1351 process_library_reports(report_list)
1352
1353 # destroy remote and guest nodes
1354 if cib is not None:
1355 try:
1356 all_remote_nodes, report_list = get_existing_nodes_names(
1357 cib=cib
1358 )
1359 if report_list:
1360 process_library_reports(report_list)
1361 if all_remote_nodes:
1362 _destroy_pcmk_remote_env(
1363 lib_env,
1364 all_remote_nodes,
1365 skip_offline_nodes=True,
1366 allow_fails=True,
1367 )
1368 except LibraryError as e:
1369 process_library_reports(list(e.args))
1370
1371 # destroy full-stack nodes
1372 destroy_cluster(corosync_nodes)
1373 else:
1374 print_to_stderr("Shutting down pacemaker/corosync services...")
1375 for service in ["pacemaker", "corosync-qdevice", "corosync"]:
1376 # It is safe to ignore error since we want it not to be running
1377 # anyways.
1378 with contextlib.suppress(LibraryError):
1379 utils.stop_service(service)
1380 print_to_stderr("Killing any remaining services...")
1381 kill_local_cluster_services()
1382 # previously errors were suppressed in here, let's keep it that way
1383 # for now
1384 with contextlib.suppress(Exception):
1385 utils.disableServices()
1386
1387 # it's not a big deal if sbd disable fails
1388 with contextlib.suppress(Exception):
1389 service_manager = utils.get_service_manager()
1390 service_manager.disable(
1391 lib_sbd.get_sbd_service_name(service_manager)
1392 )
1393
1394 print_to_stderr("Removing all cluster configuration files...")
1395 dummy_output, dummy_retval = utils.run(
1396 [
1397 settings.rm_exec,
1398 "-f",
1399 settings.corosync_conf_file,
1400 settings.corosync_authkey_file,
1401 settings.pacemaker_authkey_file,
1402 settings.pcsd_dr_config_location,
1403 ]
1404 )
1405 state_files = [
1406 "cib-*",
1407 "cib.*",
1408 "cib.xml*",
1409 "core.*",
1410 "cts.*",
1411 "hostcache",
1412 "pe*.bz2",
1413 ]
1414 for name in state_files:
1415 dummy_output, dummy_retval = utils.run(
1416 [
1417 settings.find_exec,
1418 settings.pacemaker_local_state_dir,
1419 "-name",
1420 name,
1421 "-exec",
1422 settings.rm_exec,
1423 "-f",
1424 "{}",
1425 ";",
1426 ]
1427 )
1428 # errors from deleting other files are suppressed as well we do not
1429 # want to fail if qdevice was not set up
1430 with contextlib.suppress(Exception):
1431 qdevice_net.client_destroy()
1432
1433
1434 def cluster_verify(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1435 """
1436 Options:
1437 * -f - CIB file
1438 * --full - more verbose output
1439 """
1440 modifiers.ensure_only_supported("-f", "--full")
1441 if argv:
1442 raise CmdLineInputError()
1443
1444 lib.cluster.verify(verbose=modifiers.get("--full"))
1445
1446
1447 def cluster_report(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1448 """
1449 Options:
1450 * --force - overwrite existing file
1451 * --from - timestamp
1452 * --to - timestamp
1453 """
1454 # pylint: disable=too-many-branches
1455 del lib
1456 modifiers.ensure_only_supported("--force", "--from", "--to")
1457 if len(argv) != 1:
1458 raise CmdLineInputError()
1459
1460 outfile = argv[0]
1461 dest_outfile = outfile + ".tar.bz2"
1462 if os.path.exists(dest_outfile):
1463 if not modifiers.get("--force"):
1464 utils.err(
1465 dest_outfile + " already exists, use --force to overwrite"
1466 )
1467 else:
1468 try:
1469 os.remove(dest_outfile)
1470 except OSError as e:
1471 utils.err(
1472 f"Unable to remove {dest_outfile}: {format_os_error(e)}"
1473 )
1474 crm_report_opts = []
1475
1476 crm_report_opts.append("-f")
1477 if modifiers.is_specified("--from"):
1478 crm_report_opts.append(str(modifiers.get("--from")))
1479 if modifiers.is_specified("--to"):
1480 crm_report_opts.append("-t")
1481 crm_report_opts.append(str(modifiers.get("--to")))
1482 else:
1483 yesterday = datetime.datetime.now() - datetime.timedelta(1)
1484 crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M"))
1485
1486 crm_report_opts.append(outfile)
1487 output, retval = utils.run([settings.crm_report_exec] + crm_report_opts)
1488 if retval != 0 and (
1489 "ERROR: Cannot determine nodes; specify --nodes or --single-node"
1490 in output
1491 ):
1492 utils.err("cluster is not configured on this node")
1493 newoutput = ""
1494 for line in output.split("\n"):
1495 if line.startswith(("cat:", "grep", "tail")):
1496 continue
1497 if "We will attempt to remove" in line:
1498 continue
1499 if "-p option" in line:
1500 continue
1501 if "However, doing" in line:
1502 continue
1503 if "to diagnose" in line:
1504 continue
1505 new_line = line
1506 if "--dest" in line:
1507 new_line = line.replace("--dest", "<dest>")
1508 newoutput = newoutput + new_line + "\n"
1509 if retval != 0:
1510 utils.err(newoutput)
1511 print_to_stderr(newoutput)
1512
1513
1514 def send_local_configs(
1515 node_name_list: StringIterable,
1516 clear_local_cluster_permissions: bool = False,
1517 force: bool = False,
1518 ) -> list[str]:
1519 """
1520 Commandline options:
1521 * --request-timeout - timeout of HTTP requests
1522 """
1523 pcsd_data = {
1524 "nodes": node_name_list,
1525 "force": force,
1526 "clear_local_cluster_permissions": clear_local_cluster_permissions,
1527 }
1528 err_msgs = []
1529 output, retval = utils.run_pcsdcli("send_local_configs", pcsd_data)
1530 if retval == 0 and output["status"] == "ok" and output["data"]:
1531 try:
1532 for node_name in node_name_list:
1533 node_response = output["data"][node_name]
1534 if node_response["status"] == "notauthorized":
1535 err_msgs.append(
1536 (
1537 "Unable to authenticate to {0}, try running 'pcs "
1538 "host auth {0}'"
1539 ).format(node_name)
1540 )
1541 if node_response["status"] not in ["ok", "not_supported"]:
1542 err_msgs.append(
1543 "Unable to set pcsd configs on {0}".format(node_name)
1544 )
1545 # pylint: disable=bare-except
1546 except: # noqa: E722
1547 err_msgs.append("Unable to communicate with pcsd")
1548 else:
1549 err_msgs.append("Unable to set pcsd configs")
1550 return err_msgs
1551
1552
1553 def cluster_auth_cmd(lib: Any, argv: Argv, modifiers: InputModifiers) -> None: # noqa: PLR0912
1554 """
1555 Options:
1556 * --corosync_conf - corosync.conf file
1557 * --request-timeout - timeout of HTTP requests
1558 * -u - username
1559 * -p - password
1560 """
1561 # pylint: disable=too-many-branches
1562 # pylint: disable=too-many-locals
1563 del lib
1564 modifiers.ensure_only_supported(
1565 "--corosync_conf", "--request-timeout", "-u", "-p"
1566 )
1567 if argv:
1568 raise CmdLineInputError()
1569 lib_env = utils.get_lib_env()
1570 target_factory = lib_env.get_node_target_factory()
1571 cluster_node_list = lib_env.get_corosync_conf().get_nodes()
1572 cluster_node_names = []
1573 missing_name = False
1574 for node in cluster_node_list:
1575 if node.name:
1576 cluster_node_names.append(node.name)
1577 else:
1578 missing_name = True
1579 if missing_name:
1580 warn(
1581 "Skipping nodes which do not have their name defined in "
1582 "corosync.conf, use the 'pcs host auth' command to authenticate "
1583 "them"
1584 )
1585 target_list = []
1586 not_authorized_node_name_list = []
1587 for node_name in cluster_node_names:
1588 try:
1589 target_list.append(target_factory.get_target(node_name))
1590 except HostNotFound:
1591 print_to_stderr("{}: Not authorized".format(node_name))
1592 not_authorized_node_name_list.append(node_name)
1593 com_cmd = CheckAuth(lib_env.report_processor)
1594 com_cmd.set_targets(target_list)
1595 not_authorized_node_name_list.extend(
1596 run_and_raise(lib_env.get_node_communicator(), com_cmd)
1597 )
1598 if not_authorized_node_name_list:
1599 print(
1600 "Nodes to authorize: {}".format(
1601 ", ".join(not_authorized_node_name_list)
1602 )
1603 )
1604 username, password = utils.get_user_and_pass()
1605 not_auth_node_list = []
1606 for node_name in not_authorized_node_name_list:
1607 for node in cluster_node_list:
1608 if node.name == node_name:
1609 if node.addrs_plain():
1610 not_auth_node_list.append(node)
1611 else:
1612 print_to_stderr(
1613 f"{node.name}: No addresses defined in "
1614 "corosync.conf, use the 'pcs host auth' command to "
1615 "authenticate the node"
1616 )
1617 nodes_to_auth_data = {
1618 node.name: dict(
1619 username=username,
1620 password=password,
1621 dest_list=[
1622 dict(
1623 addr=node.addrs_plain()[0],
1624 port=settings.pcsd_default_port,
1625 )
1626 ],
1627 )
1628 for node in not_auth_node_list
1629 }
1630 utils.auth_hosts(nodes_to_auth_data)
1631 else:
1632 print_to_stderr("Sending cluster config files to the nodes...")
1633 msgs = send_local_configs(cluster_node_names, force=True)
1634 for msg in msgs:
1635 warn(msg)
1636
1637
1638 def _parse_node_options(
1639 node: str,
1640 options: Argv,
1641 additional_options: StringCollection = (),
1642 additional_repeatable_options: StringCollection = (),
1643 ) -> dict[str, Union[str, list[str]]]:
1644 """
1645 Commandline options: no options
1646 """
1647 ADDR_OPT_KEYWORD = "addr" # pylint: disable=invalid-name
1648 supported_options = {ADDR_OPT_KEYWORD} | set(additional_options)
1649 repeatable_options = {ADDR_OPT_KEYWORD} | set(additional_repeatable_options)
1650 parser = KeyValueParser(options, repeatable_options)
1651 parsed_unique = parser.get_unique()
1652 parsed_repeatable = parser.get_repeatable()
1653 unknown_options = (
1654 set(parsed_unique.keys()) | set(parsed_repeatable)
1655 ) - supported_options
1656 if unknown_options:
1657 raise CmdLineInputError(
1658 f"Unknown options {format_list(unknown_options)} for node '{node}'"
1659 )
1660 parsed_unique["name"] = node
1661 if ADDR_OPT_KEYWORD in parsed_repeatable:
1662 parsed_repeatable["addrs"] = parsed_repeatable[ADDR_OPT_KEYWORD]
1663 del parsed_repeatable[ADDR_OPT_KEYWORD]
1664 return parsed_unique | parsed_repeatable
1665
1666
1667 TRANSPORT_KEYWORD = "transport"
1668 TRANSPORT_DEFAULT_SECTION = "__default__"
1669 LINK_KEYWORD = "link"
1670
1671
1672 def _parse_transport(
1673 transport_args: Argv,
1674 ) -> tuple[str, dict[str, Union[dict[str, str], list[dict[str, str]]]]]:
1675 """
1676 Commandline options: no options
1677 """
1678 if not transport_args:
1679 raise CmdLineInputError(
1680 f"{TRANSPORT_KEYWORD.capitalize()} type not defined"
1681 )
1682 transport_type, *transport_options = transport_args
1683
1684 keywords = {"compression", "crypto", LINK_KEYWORD}
1685 parsed_options = parse_args.group_by_keywords(
1686 transport_options,
1687 keywords,
1688 implicit_first_keyword=TRANSPORT_DEFAULT_SECTION,
1689 )
1690 options: dict[str, Union[dict[str, str], list[dict[str, str]]]] = {
1691 section: KeyValueParser(
1692 parsed_options.get_args_flat(section)
1693 ).get_unique()
1694 for section in keywords | {TRANSPORT_DEFAULT_SECTION}
1695 if section != LINK_KEYWORD
1696 }
1697 options[LINK_KEYWORD] = [
1698 KeyValueParser(link_options).get_unique()
1699 for link_options in parsed_options.get_args_groups(LINK_KEYWORD)
1700 ]
1701
1702 return transport_type, options
1703
1704
1705 def cluster_setup(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1706 """
1707 Options:
1708 * --wait - only effective when used with --start
1709 * --start - start cluster
1710 * --enable - enable cluster
1711 * --force - some validation issues and unresolvable addresses are treated
1712 as warnings
1713 * --no-keys-sync - do not create and distribute pcsd ssl cert and key,
1714 corosync and pacemaker authkeys
1715 * --no-cluster-uuid - do not generate a cluster UUID during setup
1716 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1717 """
1718 # pylint: disable=too-many-locals
1719 is_local = modifiers.is_specified("--corosync_conf")
1720
1721 allowed_options_common = ["--force", "--no-cluster-uuid"]
1722 allowed_options_live = [
1723 "--wait",
1724 "--start",
1725 "--enable",
1726 "--no-keys-sync",
1727 ]
1728 allowed_options_local = ["--corosync_conf", "--overwrite"]
1729 modifiers.ensure_only_supported(
1730 *(
1731 allowed_options_common
1732 + allowed_options_live
1733 + allowed_options_local
1734 ),
1735 )
1736 if is_local and modifiers.is_specified_any(allowed_options_live):
1737 raise CmdLineInputError(
1738 f"Cannot specify any of {format_list(allowed_options_live)} "
1739 "when '--corosync_conf' is specified"
1740 )
1741 if not is_local and modifiers.is_specified("--overwrite"):
1742 raise CmdLineInputError(
1743 "Cannot specify '--overwrite' when '--corosync_conf' is not "
1744 "specified"
1745 )
1746
1747 if len(argv) < 2:
1748 raise CmdLineInputError()
1749 cluster_name, *argv = argv
1750 keywords = [TRANSPORT_KEYWORD, "totem", "quorum"]
1751 parsed_args = parse_args.group_by_keywords(
1752 argv, keywords, implicit_first_keyword="nodes"
1753 )
1754 parsed_args.ensure_unique_keywords()
1755 nodes = [
1756 _parse_node_options(node, options)
1757 for node, options in parse_args.split_list_by_any_keywords(
1758 parsed_args.get_args_flat("nodes"), "node name"
1759 ).items()
1760 ]
1761
1762 transport_type = None
1763 transport_options: dict[
1764 str, Union[dict[str, str], list[dict[str, str]]]
1765 ] = {}
1766
1767 if parsed_args.has_keyword(TRANSPORT_KEYWORD):
1768 transport_type, transport_options = _parse_transport(
1769 parsed_args.get_args_flat(TRANSPORT_KEYWORD)
1770 )
1771
1772 force_flags = []
1773 if modifiers.get("--force"):
1774 force_flags.append(reports.codes.FORCE)
1775
1776 totem_options = KeyValueParser(
1777 parsed_args.get_args_flat("totem")
1778 ).get_unique()
1779 quorum_options = KeyValueParser(
1780 parsed_args.get_args_flat("quorum")
1781 ).get_unique()
1782
1783 if not is_local:
1784 lib.cluster.setup(
1785 cluster_name,
1786 nodes,
1787 transport_type=transport_type,
1788 transport_options=transport_options.get(
1789 TRANSPORT_DEFAULT_SECTION, {}
1790 ),
1791 link_list=transport_options.get(LINK_KEYWORD, []),
1792 compression_options=transport_options.get("compression", {}),
1793 crypto_options=transport_options.get("crypto", {}),
1794 totem_options=totem_options,
1795 quorum_options=quorum_options,
1796 wait=modifiers.get("--wait"),
1797 start=modifiers.get("--start"),
1798 enable=modifiers.get("--enable"),
1799 no_keys_sync=modifiers.get("--no-keys-sync"),
1800 no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1801 force_flags=force_flags,
1802 )
1803 return
1804
1805 corosync_conf_data = lib.cluster.setup_local(
1806 cluster_name,
1807 nodes,
1808 transport_type=transport_type,
1809 transport_options=transport_options.get(TRANSPORT_DEFAULT_SECTION, {}),
1810 link_list=transport_options.get(LINK_KEYWORD, []),
1811 compression_options=transport_options.get("compression", {}),
1812 crypto_options=transport_options.get("crypto", {}),
1813 totem_options=totem_options,
1814 quorum_options=quorum_options,
1815 no_cluster_uuid=modifiers.is_specified("--no-cluster-uuid"),
1816 force_flags=force_flags,
1817 )
1818
1819 corosync_conf_file = pcs_file.RawFile(
1820 file_metadata.for_file_type(
1821 file_type_codes.COROSYNC_CONF, modifiers.get("--corosync_conf")
1822 )
1823 )
1824 overwrite = modifiers.is_specified("--overwrite")
1825 try:
1826 corosync_conf_file.write(corosync_conf_data, can_overwrite=overwrite)
1827 except pcs_file.FileAlreadyExists as e:
1828 utils.err(
1829 reports.messages.FileAlreadyExists(
1830 e.metadata.file_type_code,
1831 e.metadata.path,
1832 ).message
1833 + ", use --overwrite to overwrite existing file(s)"
1834 )
1835 except pcs_file.RawFileError as e:
1836 utils.err(
1837 reports.messages.FileIoError(
1838 e.metadata.file_type_code,
1839 e.action,
1840 e.reason,
1841 file_path=e.metadata.path,
1842 ).message
1843 )
1844
1845
1846 def config_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
1847 """
1848 Options:
1849 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1850 """
1851 modifiers.ensure_only_supported("--corosync_conf")
1852 parsed_args = parse_args.group_by_keywords(
1853 argv,
1854 ["transport", "compression", "crypto", "totem"],
1855 )
1856
1857 transport_options = KeyValueParser(
1858 parsed_args.get_args_flat("transport")
1859 ).get_unique()
1860 compression_options = KeyValueParser(
1861 parsed_args.get_args_flat("compression")
1862 ).get_unique()
1863 crypto_options = KeyValueParser(
1864 parsed_args.get_args_flat("crypto")
1865 ).get_unique()
1866 totem_options = KeyValueParser(
1867 parsed_args.get_args_flat("totem")
1868 ).get_unique()
1869
1870 if not modifiers.is_specified("--corosync_conf"):
1871 lib.cluster.config_update(
1872 transport_options,
1873 compression_options,
1874 crypto_options,
1875 totem_options,
1876 )
1877 return
1878
1879 _corosync_conf_local_cmd_call(
1880 modifiers.get("--corosync_conf"),
1881 lambda corosync_conf_content: lib.cluster.config_update_local(
1882 corosync_conf_content,
1883 transport_options,
1884 compression_options,
1885 crypto_options,
1886 totem_options,
1887 ),
1888 )
1889
1890
1891 def _format_options(label: str, options: Mapping[str, str]) -> list[str]:
1892 output = []
1893 if options:
1894 output.append(f"{label}:")
1895 output.extend(
1896 indent([f"{opt}: {val}" for opt, val in sorted(options.items())])
1897 )
1898 return output
1899
1900
1901 def _format_nodes(nodes: Iterable[CorosyncNodeDto]) -> list[str]:
1902 output = ["Nodes:"]
1903 for node in sorted(nodes, key=lambda node: node.name):
1904 node_attrs = [
1905 f"Link {addr.link} address: {addr.addr}"
1906 for addr in sorted(node.addrs, key=lambda addr: addr.link)
1907 ] + [f"nodeid: {node.nodeid}"]
1908 output.extend(indent([f"{node.name}:"] + indent(node_attrs)))
1909 return output
1910
1911
1912 def config_show(
1913 lib: Any, argv: Argv, modifiers: parse_args.InputModifiers
1914 ) -> None:
1915 """
1916 Options:
1917 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
1918 * --output-format - supported formats: text, cmd, json
1919 """
1920 modifiers.ensure_only_supported(
1921 "--corosync_conf", output_format_supported=True
1922 )
1923 if argv:
1924 raise CmdLineInputError()
1925 output_format = modifiers.get_output_format()
1926 corosync_conf_dto = lib.cluster.get_corosync_conf_struct()
1927 if output_format == OUTPUT_FORMAT_VALUE_CMD:
1928 if corosync_conf_dto.quorum_device is not None:
1929 warn(
1930 "Quorum device configuration detected but not yet supported by "
1931 "this command."
1932 )
1933 output = " \\\n".join(_config_get_cmd(corosync_conf_dto))
1934 elif output_format == OUTPUT_FORMAT_VALUE_JSON:
1935 output = json.dumps(dto.to_dict(corosync_conf_dto))
1936 else:
1937 output = "\n".join(_config_get_text(corosync_conf_dto))
1938 print(output)
1939
1940
1941 def _config_get_text(corosync_conf: CorosyncConfDto) -> list[str]:
1942 lines = [f"Cluster Name: {corosync_conf.cluster_name}"]
1943 if corosync_conf.cluster_uuid:
1944 lines.append(f"Cluster UUID: {corosync_conf.cluster_uuid}")
1945 lines.append(f"Transport: {corosync_conf.transport.lower()}")
1946 lines.extend(_format_nodes(corosync_conf.nodes))
1947 if corosync_conf.links_options:
1948 lines.append("Links:")
1949 for linknum, link_options in sorted(
1950 corosync_conf.links_options.items()
1951 ):
1952 lines.extend(
1953 indent(_format_options(f"Link {linknum}", link_options))
1954 )
1955
1956 lines.extend(
1957 _format_options("Transport Options", corosync_conf.transport_options)
1958 )
1959 lines.extend(
1960 _format_options(
1961 "Compression Options", corosync_conf.compression_options
1962 )
1963 )
1964 lines.extend(
1965 _format_options("Crypto Options", corosync_conf.crypto_options)
1966 )
1967 lines.extend(_format_options("Totem Options", corosync_conf.totem_options))
1968 lines.extend(
1969 _format_options("Quorum Options", corosync_conf.quorum_options)
1970 )
1971 if corosync_conf.quorum_device:
1972 lines.append(f"Quorum Device: {corosync_conf.quorum_device.model}")
1973 lines.extend(
1974 indent(
1975 _format_options(
1976 "Options", corosync_conf.quorum_device.generic_options
1977 )
1978 )
1979 )
1980 lines.extend(
1981 indent(
1982 _format_options(
1983 "Model Options",
1984 corosync_conf.quorum_device.model_options,
1985 )
1986 )
1987 )
1988 lines.extend(
1989 indent(
1990 _format_options(
1991 "Heuristics",
1992 corosync_conf.quorum_device.heuristics_options,
1993 )
1994 )
1995 )
1996 return lines
1997
1998
1999 def _corosync_node_to_cmd_line(node: CorosyncNodeDto) -> str:
2000 return " ".join(
2001 [node.name]
2002 + [
2003 f"addr={addr.addr}"
2004 for addr in sorted(node.addrs, key=lambda addr: addr.link)
2005 ]
2006 )
2007
2008
2009 def _section_to_lines(
2010 options: Mapping[str, str], keyword: Optional[str] = None
2011 ) -> list[str]:
2012 output: list[str] = []
2013 if options:
2014 if keyword:
2015 output.append(keyword)
2016 output.extend(
2017 indent([f"{key}={val}" for key, val in sorted(options.items())])
2018 )
2019 return indent(output)
2020
2021
2022 def _config_get_cmd(corosync_conf: CorosyncConfDto) -> list[str]:
2023 lines = [f"pcs cluster setup {corosync_conf.cluster_name}"]
2024 lines += indent(
2025 [
2026 _corosync_node_to_cmd_line(node)
2027 for node in sorted(
2028 corosync_conf.nodes, key=lambda node: node.nodeid
2029 )
2030 ]
2031 )
2032 transport = [
2033 "transport",
2034 str(corosync_conf.transport.value).lower(),
2035 ] + _section_to_lines(corosync_conf.transport_options)
2036 for _, link in sorted(corosync_conf.links_options.items()):
2037 transport.extend(_section_to_lines(link, "link"))
2038 transport.extend(
2039 _section_to_lines(corosync_conf.compression_options, "compression")
2040 )
2041 transport.extend(_section_to_lines(corosync_conf.crypto_options, "crypto"))
2042 lines.extend(indent(transport))
2043 lines.extend(_section_to_lines(corosync_conf.totem_options, "totem"))
2044 lines.extend(_section_to_lines(corosync_conf.quorum_options, "quorum"))
2045 if not corosync_conf.cluster_uuid:
2046 lines.extend(indent(["--no-cluster-uuid"]))
2047 return lines
2048
2049
2050 def _parse_add_node(argv: Argv) -> dict[str, Union[str, list[str]]]:
2051 DEVICE_KEYWORD = "device" # pylint: disable=invalid-name
2052 WATCHDOG_KEYWORD = "watchdog" # pylint: disable=invalid-name
2053 hostname, *argv = argv
2054 node_dict = _parse_node_options(
2055 hostname,
2056 argv,
2057 additional_options={DEVICE_KEYWORD, WATCHDOG_KEYWORD},
2058 additional_repeatable_options={DEVICE_KEYWORD},
2059 )
2060 if DEVICE_KEYWORD in node_dict:
2061 node_dict[f"{DEVICE_KEYWORD}s"] = node_dict[DEVICE_KEYWORD]
2062 del node_dict[DEVICE_KEYWORD]
2063 return node_dict
2064
2065
2066 def node_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2067 """
2068 Options:
2069 * --wait - wait until new node will start up, effective only when --start
2070 is specified
2071 * --start - start new node
2072 * --enable - enable new node
2073 * --force - treat validation issues and not resolvable addresses as
2074 warnings instead of errors
2075 * --skip-offline - skip unreachable nodes
2076 * --no-watchdog-validation - do not validatate watchdogs
2077 * --request-timeout - HTTP request timeout
2078 """
2079 modifiers.ensure_only_supported(
2080 "--wait",
2081 "--start",
2082 "--enable",
2083 "--force",
2084 "--skip-offline",
2085 "--no-watchdog-validation",
2086 "--request-timeout",
2087 )
2088 if not argv:
2089 raise CmdLineInputError()
2090
2091 node_dict = _parse_add_node(argv)
2092
2093 force_flags = []
2094 if modifiers.get("--force"):
2095 force_flags.append(reports.codes.FORCE)
2096 if modifiers.get("--skip-offline"):
2097 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2098
2099 lib.cluster.add_nodes(
2100 nodes=[node_dict],
2101 wait=modifiers.get("--wait"),
2102 start=modifiers.get("--start"),
2103 enable=modifiers.get("--enable"),
2104 no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
2105 force_flags=force_flags,
2106 )
2107
2108
2109 def remove_nodes_from_cib(
2110 lib: Any, argv: Argv, modifiers: InputModifiers
2111 ) -> None:
2112 """
2113 Options: no options
2114 """
2115 modifiers.ensure_only_supported()
2116 if not argv:
2117 raise CmdLineInputError("No nodes specified")
2118 lib.cluster.remove_nodes_from_cib(argv)
2119
2120
2121 def link_add(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2122 """
2123 Options:
2124 * --force - treat validation issues and not resolvable addresses as
2125 warnings instead of errors
2126 * --skip-offline - skip unreachable nodes
2127 * --request-timeout - HTTP request timeout
2128 """
2129 modifiers.ensure_only_supported(
2130 "--force", "--request-timeout", "--skip-offline"
2131 )
2132 if not argv:
2133 raise CmdLineInputError()
2134
2135 force_flags = []
2136 if modifiers.get("--force"):
2137 force_flags.append(reports.codes.FORCE)
2138 if modifiers.get("--skip-offline"):
2139 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2140
2141 parsed = parse_args.group_by_keywords(
2142 argv, {"options"}, implicit_first_keyword="nodes"
2143 )
2144 parsed.ensure_unique_keywords()
2145
2146 lib.cluster.add_link(
2147 KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2148 KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2149 force_flags=force_flags,
2150 )
2151
2152
2153 def link_remove(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2154 """
2155 Options:
2156 * --skip-offline - skip unreachable nodes
2157 * --request-timeout - HTTP request timeout
2158 """
2159 modifiers.ensure_only_supported("--request-timeout", "--skip-offline")
2160 if not argv:
2161 raise CmdLineInputError()
2162
2163 force_flags = []
2164 if modifiers.get("--skip-offline"):
2165 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2166
2167 lib.cluster.remove_links(argv, force_flags=force_flags)
2168
2169
2170 def link_update(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2171 """
2172 Options:
2173 * --force - treat validation issues and not resolvable addresses as
2174 warnings instead of errors
2175 * --skip-offline - skip unreachable nodes
2176 * --request-timeout - HTTP request timeout
2177 """
2178 modifiers.ensure_only_supported(
2179 "--force", "--request-timeout", "--skip-offline"
2180 )
2181 if len(argv) < 2:
2182 raise CmdLineInputError()
2183
2184 force_flags = []
2185 if modifiers.get("--force"):
2186 force_flags.append(reports.codes.FORCE)
2187 if modifiers.get("--skip-offline"):
2188 force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
2189
2190 linknumber = argv[0]
2191 parsed = parse_args.group_by_keywords(
2192 argv[1:], {"options"}, implicit_first_keyword="nodes"
2193 )
2194 parsed.ensure_unique_keywords()
2195
2196 lib.cluster.update_link(
2197 linknumber,
2198 KeyValueParser(parsed.get_args_flat("nodes")).get_unique(),
2199 KeyValueParser(parsed.get_args_flat("options")).get_unique(),
2200 force_flags=force_flags,
2201 )
2202
2203
2204 def generate_uuid(lib: Any, argv: Argv, modifiers: InputModifiers) -> None:
2205 """
2206 Options:
2207 * --force - allow to rewrite an existing UUID in corosync.conf
2208 * --corosync_conf - corosync.conf file path, do not talk to cluster nodes
2209 """
2210 modifiers.ensure_only_supported("--force", "--corosync_conf")
2211 if argv:
2212 raise CmdLineInputError()
2213
2214 force_flags = []
2215 if modifiers.get("--force"):
2216 force_flags.append(reports.codes.FORCE)
2217
2218 if not modifiers.is_specified("--corosync_conf"):
2219 lib.cluster.generate_cluster_uuid(force_flags=force_flags)
2220 return
2221
2222 _corosync_conf_local_cmd_call(
2223 modifiers.get("--corosync_conf"),
2224 lambda corosync_conf_content: lib.cluster.generate_cluster_uuid_local(
2225 corosync_conf_content, force_flags=force_flags
2226 ),
2227 )
2228