1    	/*
2    	 * Copyright (C) 2016-2025 Red Hat, Inc.  All rights reserved.
3    	 *
4    	 * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
5    	 *          Federico Simoncelli <fsimon@kronosnet.org>
6    	 *
7    	 * This software licensed under LGPL-2.0+
8    	 */
9    	
10   	#include "config.h"
11   	
12   	#include <pthread.h>
13   	#include <errno.h>
14   	#include <string.h>
15   	
16   	#include "internals.h"
17   	#include "logging.h"
18   	#include "threads_common.h"
19   	
20   	int shutdown_in_progress(knet_handle_t knet_h)
21   	{
22   		int savederrno = 0;
23   		int ret;
24   	
25   		savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
26   		if (savederrno) {
27   			log_err(knet_h, KNET_SUB_COMMON, "Unable to get read lock: %s",
28   				strerror(savederrno));
29   			errno = savederrno;
30   			return -1;
31   		}
32   	
33   		ret = knet_h->fini_in_progress;
34   	
35   		pthread_rwlock_unlock(&knet_h->global_rwlock);
36   	
37   		return ret;
38   	}
39   	
40   	static int _pmtud_reschedule(knet_handle_t knet_h)
41   	{
42   		if (knet_h->pmtud_running) {
43   			knet_h->pmtud_abort = 1;
44   	
45   			if (knet_h->pmtud_waiting) {
46   				pthread_cond_signal(&knet_h->pmtud_cond);
47   			}
48   		}
49   		return 0;
50   	}
51   	
52   	static int pmtud_reschedule(knet_handle_t knet_h)
53   	{
54   		int res;
55   	
(1) Event lock: "pthread_mutex_lock" locks "knet_h->pmtud_mutex".
(2) Event cond_false: Condition "pthread_mutex_lock(&knet_h->pmtud_mutex) != 0", taking false branch.
56   		if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
57   			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
58   			return -1;
(3) Event if_end: End of if statement.
59   		}
60   		res = _pmtud_reschedule(knet_h);
61   		pthread_mutex_unlock(&knet_h->pmtud_mutex);
62   		return res;
63   	}
64   	
65   	int get_global_wrlock(knet_handle_t knet_h)
66   	{
(1) Event lock: "pmtud_reschedule" locks "knet_h->pmtud_mutex". [details]
(2) Event cond_false: Condition "pmtud_reschedule(knet_h) < 0", taking false branch.
67   		if (pmtud_reschedule(knet_h) < 0) {
68   			log_info(knet_h, KNET_SUB_PMTUD, "Unable to notify PMTUd to reschedule. Expect delays in executing API calls");
(3) Event if_end: End of if statement.
69   		}
70   		return pthread_rwlock_wrlock(&knet_h->global_rwlock);
71   	}
72   	
73   	static struct pretty_names thread_names[KNET_THREAD_MAX] =
74   	{
75   		{ "TX", KNET_THREAD_TX },
76   		{ "RX", KNET_THREAD_RX },
77   		{ "HB", KNET_THREAD_HB },
78   		{ "PMTUD", KNET_THREAD_PMTUD },
79   	#ifdef HAVE_NETINET_SCTP_H
80   		{ "SCTP_LISTEN", KNET_THREAD_SCTP_LISTEN },
81   		{ "SCTP_CONN", KNET_THREAD_SCTP_CONN },
82   	#endif
83   		{ "DST_LINK", KNET_THREAD_DST_LINK }
84   	};
85   	
86   	static struct pretty_names thread_status[] =
87   	{
88   		{ "unregistered", KNET_THREAD_UNREGISTERED },
89   		{ "registered", KNET_THREAD_REGISTERED },
90   		{ "started", KNET_THREAD_STARTED },
91   		{ "stopped", KNET_THREAD_STOPPED }
92   	};
93   	
94   	static const char *get_thread_status_name(uint8_t status)
95   	{
96   		unsigned int i;
97   	
98   		for (i = 0; i < KNET_THREAD_STATUS_MAX; i++) {
99   			if (thread_status[i].val == status) {
100  				return thread_status[i].name;
101  			}
102  		}
103  		return "unknown";
104  	}
105  	
106  	static const char *get_thread_name(uint8_t thread_id)
107  	{
108  		unsigned int i;
109  	
110  		for (i = 0; i < KNET_THREAD_MAX; i++) {
111  			if (thread_names[i].val == thread_id) {
112  				return thread_names[i].name;
113  			}
114  		}
115  		return "unknown";
116  	}
117  	
118  	int get_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id)
119  	{
120  		uint8_t flush;
121  	
122  		if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
123  			log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
124  			return -1;
125  		}
126  	
127  		flush = knet_h->threads_flush_queue[thread_id];
128  	
129  		pthread_mutex_unlock(&knet_h->threads_status_mutex);
130  		return flush;
131  	}
132  	
133  	int set_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id, uint8_t status)
134  	{
135  		if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
136  			log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
137  			return -1;
138  		}
139  	
140  		knet_h->threads_flush_queue[thread_id] = status;
141  	
142  		log_debug(knet_h, KNET_SUB_HANDLE, "Updated flush queue request for thread %s to %u",
143  			  get_thread_name(thread_id), status);
144  	
145  		pthread_mutex_unlock(&knet_h->threads_status_mutex);
146  		return 0;
147  	}
148  	
149  	int wait_all_threads_flush_queue(knet_handle_t knet_h)
150  	{
151  		uint8_t i = 0, found = 0;
152  	
153  		while (!found) {
154  			usleep(KNET_THREADS_TIMERES);
155  	
156  			if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
157  				continue;
158  			}
159  	
160  			found = 1;
161  	
162  			for (i = 0; i < KNET_THREAD_MAX; i++) {
163  				if (knet_h->threads_flush_queue[i] == KNET_THREAD_QUEUE_FLUSHED) {
164  					continue;
165  				}
166  				log_debug(knet_h, KNET_SUB_HANDLE, "Checking thread: %s queue: %u",
167  						get_thread_name(i),
168  						knet_h->threads_flush_queue[i]);
169  				if (knet_h->threads_flush_queue[i] != KNET_THREAD_QUEUE_FLUSHED) {
170  					found = 0;
171  				}
172  			}
173  	
174  			pthread_mutex_unlock(&knet_h->threads_status_mutex);
175  		}
176  	
177  		return 0;
178  	}
179  	
180  	int set_thread_status(knet_handle_t knet_h, uint8_t thread_id, uint8_t status)
181  	{
182  		if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
183  			log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
184  			return -1;
185  		}
186  	
187  		knet_h->threads_status[thread_id] = status;
188  	
189  		log_debug(knet_h, KNET_SUB_HANDLE, "Updated status for thread %s to %s",
190  			  get_thread_name(thread_id), get_thread_status_name(status));
191  	
192  		pthread_mutex_unlock(&knet_h->threads_status_mutex);
193  		return 0;
194  	}
195  	
196  	int wait_all_threads_status(knet_handle_t knet_h, uint8_t status)
197  	{
198  		uint8_t i = 0, found = 0;
199  	
200  		while (!found) {
201  			usleep(KNET_THREADS_TIMERES);
202  	
203  			if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
204  				continue;
205  			}
206  	
207  			found = 1;
208  	
209  			for (i = 0; i < KNET_THREAD_MAX; i++) {
210  				if (knet_h->threads_status[i] == KNET_THREAD_UNREGISTERED) {
211  					continue;
212  				}
213  				log_debug(knet_h, KNET_SUB_HANDLE, "Checking thread: %s status: %s req: %s",
214  						get_thread_name(i),
215  						get_thread_status_name(knet_h->threads_status[i]),
216  						get_thread_status_name(status));
217  				if (knet_h->threads_status[i] != status) {
218  					found = 0;
219  				}
220  			}
221  	
222  			pthread_mutex_unlock(&knet_h->threads_status_mutex);
223  		}
224  	
225  		return 0;
226  	}
227  	
228  	void force_pmtud_run(knet_handle_t knet_h, uint8_t subsystem, uint8_t reset_mtu, uint8_t force_restart)
229  	{
230  		if (reset_mtu) {
231  			log_debug(knet_h, subsystem, "PMTUd has been reset to default");
232  			knet_h->data_mtu = calc_min_mtu(knet_h);
233  			if (knet_h->pmtud_notify_fn) {
234  				knet_h->pmtud_notify_fn(knet_h->pmtud_notify_fn_private_data,
235  							knet_h->data_mtu);
236  			}
237  		}
238  	
239  		/*
240  		 * we can only try to take a lock here. This part of the code
241  		 * can be invoked by any thread, including PMTUd that is already
242  		 * holding a lock at that stage.
243  		 * If PMTUd is holding the lock, most likely it is already running
244  		 * and we don't need to notify it back.
245  		 */
246  		if (!pthread_mutex_trylock(&knet_h->pmtud_mutex)) {
247  			if (!knet_h->pmtud_running) {
248  				if (!knet_h->pmtud_forcerun) {
249  					log_debug(knet_h, subsystem, "Notifying PMTUd to rerun");
250  					knet_h->pmtud_forcerun = 1;
251  				}
252  			} else {
253  				if (force_restart) {
254  					if (_pmtud_reschedule(knet_h) < 0) {
255  						log_info(knet_h, KNET_SUB_PMTUD, "Unable to notify PMTUd to reschedule. A joining node may struggle to connect properly");
256  					}
257  				}
258  			}
259  			pthread_mutex_unlock(&knet_h->pmtud_mutex);
260  		}
261  	}
262