1 /*
2 * Copyright (C) 2010-2025 Red Hat, Inc. All rights reserved.
3 *
4 * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
5 * Federico Simoncelli <fsimon@kronosnet.org>
6 *
7 * This software licensed under LGPL-2.0+
8 */
9
10 #include "config.h"
11
12 #include <stdlib.h>
13 #include <string.h>
14 #include <errno.h>
15 #include <pthread.h>
16 #include <stdio.h>
17
18 #include "host.h"
19 #include "internals.h"
20 #include "logging.h"
21 #include "threads_common.h"
22
23 static void _host_list_update(knet_handle_t knet_h)
24 {
25 struct knet_host *host;
26 knet_h->host_ids_entries = 0;
27
28 for (host = knet_h->host_head; host != NULL; host = host->next) {
29 knet_h->host_ids[knet_h->host_ids_entries] = host->host_id;
30 knet_h->host_ids_entries++;
31 }
32 }
33
34 int knet_host_add(knet_handle_t knet_h, knet_node_id_t host_id)
35 {
36 int savederrno = 0, err = 0;
37 struct knet_host *host = NULL;
38 uint8_t link_idx;
39
40 if (!_is_valid_handle(knet_h)) {
41 return -1;
42 }
43
44 savederrno = get_global_wrlock(knet_h);
45 if (savederrno) {
46 log_err(knet_h, KNET_SUB_HOST, "Unable to get write lock: %s",
47 strerror(savederrno));
48 errno = savederrno;
49 return -1;
50 }
51
52 if (knet_h->host_index[host_id]) {
53 err = -1;
54 savederrno = EEXIST;
55 log_err(knet_h, KNET_SUB_HOST, "Unable to add host %u: %s",
56 host_id, strerror(savederrno));
57 goto exit_unlock;
58 }
59
60 host = malloc(sizeof(struct knet_host));
61 if (!host) {
62 err = -1;
63 savederrno = errno;
64 log_err(knet_h, KNET_SUB_HOST, "Unable to allocate memory for host %u: %s",
65 host_id, strerror(savederrno));
66 goto exit_unlock;
67 }
68
69 memset(host, 0, sizeof(struct knet_host));
70
71 /*
72 * set host_id
73 */
74 host->host_id = host_id;
75
76 /*
77 * set default host->name to host_id for logging
78 */
79 snprintf(host->name, KNET_MAX_HOST_LEN, "%u", host_id);
80
81 /*
82 * initialize links internal data
83 */
84 for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
85 host->link[link_idx].link_id = link_idx;
86 host->link[link_idx].status.stats.latency_min = UINT32_MAX;
87 }
88
89 /*
90 * add new host to the index
91 */
92 knet_h->host_index[host_id] = host;
93
94 /*
95 * add new host to host list
96 */
97 if (knet_h->host_head) {
98 host->next = knet_h->host_head;
99 }
100 knet_h->host_head = host;
101
102 _host_list_update(knet_h);
103
104 exit_unlock:
105 pthread_rwlock_unlock(&knet_h->global_rwlock);
106 if (err < 0) {
107 free(host);
108 }
109 errno = err ? savederrno : 0;
110 return err;
111 }
112
113 int knet_host_remove(knet_handle_t knet_h, knet_node_id_t host_id)
114 {
115 int savederrno = 0, err = 0;
116 struct knet_host *host, *removed;
117 uint8_t link_idx;
118
|
(1) Event cond_false: |
Condition "!_is_valid_handle(knet_h)", taking false branch. |
119 if (!_is_valid_handle(knet_h)) {
120 return -1;
|
(2) Event if_end: |
End of if statement. |
121 }
122
123 savederrno = get_global_wrlock(knet_h);
|
(3) Event cond_false: |
Condition "savederrno", taking false branch. |
124 if (savederrno) {
125 log_err(knet_h, KNET_SUB_HOST, "Unable to get write lock: %s",
126 strerror(savederrno));
127 errno = savederrno;
128 return -1;
|
(4) Event if_end: |
End of if statement. |
129 }
130
131 host = knet_h->host_index[host_id];
132
|
(5) Event cond_false: |
Condition "!host", taking false branch. |
133 if (!host) {
134 err = -1;
135 savederrno = EINVAL;
136 log_err(knet_h, KNET_SUB_HOST, "Unable to remove host %u: %s",
137 host_id, strerror(savederrno));
138 goto exit_unlock;
|
(6) Event if_end: |
End of if statement. |
139 }
140
141 /*
142 * if links are configured we cannot release the host
143 */
144
|
(7) Event cond_true: |
Condition "link_idx < 8", taking true branch. |
|
(11) Event loop_begin: |
Jumped back to beginning of loop. |
|
(12) Event cond_true: |
Condition "link_idx < 8", taking true branch. |
|
(16) Event loop_begin: |
Jumped back to beginning of loop. |
|
(17) Event cond_false: |
Condition "link_idx < 8", taking false branch. |
145 for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
|
(8) Event cond_false: |
Condition "host->link[link_idx].configured", taking false branch. |
|
(13) Event cond_false: |
Condition "host->link[link_idx].configured", taking false branch. |
146 if (host->link[link_idx].configured) {
147 err = -1;
148 savederrno = EBUSY;
149 log_err(knet_h, KNET_SUB_HOST, "Unable to remove host %u, links are still configured: %s",
150 host_id, strerror(savederrno));
151 goto exit_unlock;
|
(9) Event if_end: |
End of if statement. |
|
(14) Event if_end: |
End of if statement. |
152 }
|
(10) Event loop: |
Jumping back to the beginning of the loop. |
|
(15) Event loop: |
Jumping back to the beginning of the loop. |
|
(18) Event loop_end: |
Reached end of loop. |
153 }
154
155 removed = NULL;
156
157 /*
158 * removing host from list
159 */
|
(19) Event cond_true: |
Condition "knet_h->host_head->host_id == host_id", taking true branch. |
160 if (knet_h->host_head->host_id == host_id) {
161 removed = knet_h->host_head;
162 knet_h->host_head = removed->next;
163 } else {
164 for (host = knet_h->host_head; host->next != NULL; host = host->next) {
165 if (host->next->host_id == host_id) {
166 removed = host->next;
167 host->next = removed->next;
168 break;
169 }
170 }
171 }
172
173 knet_h->host_index[host_id] = NULL;
174 free(removed);
175
176 _host_list_update(knet_h);
177
178 exit_unlock:
179 pthread_rwlock_unlock(&knet_h->global_rwlock);
180 errno = err ? savederrno : 0;
181 return err;
182 }
183
184 int knet_host_set_name(knet_handle_t knet_h, knet_node_id_t host_id, const char *name)
185 {
186 int savederrno = 0, err = 0;
187 struct knet_host *host;
188
189 if (!_is_valid_handle(knet_h)) {
190 return -1;
191 }
192
193 savederrno = get_global_wrlock(knet_h);
194 if (savederrno) {
195 log_err(knet_h, KNET_SUB_HOST, "Unable to get write lock: %s",
196 strerror(savederrno));
197 errno = savederrno;
198 return -1;
199 }
200
201 if (!knet_h->host_index[host_id]) {
202 err = -1;
203 savederrno = EINVAL;
204 log_err(knet_h, KNET_SUB_HOST, "Unable to find host %u to set name: %s",
205 host_id, strerror(savederrno));
206 goto exit_unlock;
207 }
208
209 if (!name) {
210 err = -1;
211 savederrno = EINVAL;
212 log_err(knet_h, KNET_SUB_HOST, "Unable to set name for host %u: %s",
213 host_id, strerror(savederrno));
214 goto exit_unlock;
215 }
216
217 if (strlen(name) >= KNET_MAX_HOST_LEN) {
218 err = -1;
219 savederrno = EINVAL;
220 log_err(knet_h, KNET_SUB_HOST, "Requested name for host %u is too long: %s",
221 host_id, strerror(savederrno));
222 goto exit_unlock;
223 }
224
225 for (host = knet_h->host_head; host != NULL; host = host->next) {
226 if (!strncmp(host->name, name, KNET_MAX_HOST_LEN)) {
227 err = -1;
228 savederrno = EEXIST;
229 log_err(knet_h, KNET_SUB_HOST, "Duplicated name found on host_id %u",
230 host->host_id);
231 goto exit_unlock;
232 }
233 }
234
235 snprintf(knet_h->host_index[host_id]->name, KNET_MAX_HOST_LEN, "%s", name);
236
237 exit_unlock:
238 pthread_rwlock_unlock(&knet_h->global_rwlock);
239 errno = err ? savederrno : 0;
240 return err;
241 }
242
243 int knet_host_get_name_by_host_id(knet_handle_t knet_h, knet_node_id_t host_id,
244 char *name)
245 {
246 int savederrno = 0, err = 0;
247
248 if (!_is_valid_handle(knet_h)) {
249 return -1;
250 }
251
252 if (!name) {
253 errno = EINVAL;
254 return -1;
255 }
256
257 savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
258 if (savederrno) {
259 log_err(knet_h, KNET_SUB_HOST, "Unable to get read lock: %s",
260 strerror(savederrno));
261 errno = savederrno;
262 return -1;
263 }
264
265 if (!knet_h->host_index[host_id]) {
266 savederrno = EINVAL;
267 err = -1;
268 log_debug(knet_h, KNET_SUB_HOST, "Host %u not found", host_id);
269 goto exit_unlock;
270 }
271
272 snprintf(name, KNET_MAX_HOST_LEN, "%s", knet_h->host_index[host_id]->name);
273
274 exit_unlock:
275 pthread_rwlock_unlock(&knet_h->global_rwlock);
276 errno = err ? savederrno : 0;
277 return err;
278 }
279
280 int knet_host_get_id_by_host_name(knet_handle_t knet_h, const char *name,
281 knet_node_id_t *host_id)
282 {
283 int savederrno = 0, err = 0, found = 0;
284 struct knet_host *host;
285
286 if (!_is_valid_handle(knet_h)) {
287 return -1;
288 }
289
290 if (!name) {
291 errno = EINVAL;
292 return -1;
293 }
294
295 if (!host_id) {
296 errno = EINVAL;
297 return -1;
298 }
299
300 savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
301 if (savederrno) {
302 log_err(knet_h, KNET_SUB_HOST, "Unable to get read lock: %s",
303 strerror(savederrno));
304 errno = savederrno;
305 return -1;
306 }
307
308 for (host = knet_h->host_head; host != NULL; host = host->next) {
309 if (!strncmp(name, host->name, KNET_MAX_HOST_LEN)) {
310 found = 1;
311 *host_id = host->host_id;
312 break;
313 }
314 }
315
316 if (!found) {
317 savederrno = ENOENT;
318 err = -1;
319 }
320
321 pthread_rwlock_unlock(&knet_h->global_rwlock);
322 errno = err ? savederrno : 0;
323 return err;
324 }
325
326 int knet_host_get_host_list(knet_handle_t knet_h,
327 knet_node_id_t *host_ids, size_t *host_ids_entries)
328 {
329 int savederrno = 0;
330
331 if (!_is_valid_handle(knet_h)) {
332 return -1;
333 }
334
335 if ((!host_ids) || (!host_ids_entries)) {
336 errno = EINVAL;
337 return -1;
338 }
339
340 savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
341 if (savederrno) {
342 log_err(knet_h, KNET_SUB_HOST, "Unable to get read lock: %s",
343 strerror(savederrno));
344 errno = savederrno;
345 return -1;
346 }
347
348 memmove(host_ids, knet_h->host_ids, sizeof(knet_h->host_ids));
349 *host_ids_entries = knet_h->host_ids_entries;
350
351 pthread_rwlock_unlock(&knet_h->global_rwlock);
352 return 0;
353 }
354
355 int knet_host_set_policy(knet_handle_t knet_h, knet_node_id_t host_id,
356 uint8_t policy)
357 {
358 int savederrno = 0, err = 0;
359 uint8_t old_policy;
360
361 if (!_is_valid_handle(knet_h)) {
362 return -1;
363 }
364
365 if (policy > KNET_LINK_POLICY_RR) {
366 errno = EINVAL;
367 return -1;
368 }
369
370 savederrno = get_global_wrlock(knet_h);
371 if (savederrno) {
372 log_err(knet_h, KNET_SUB_HOST, "Unable to get write lock: %s",
373 strerror(savederrno));
374 errno = savederrno;
375 return -1;
376 }
377
378 if (!knet_h->host_index[host_id]) {
379 err = -1;
380 savederrno = EINVAL;
381 log_err(knet_h, KNET_SUB_HOST, "Unable to set name for host %u: %s",
382 host_id, strerror(savederrno));
383 goto exit_unlock;
384 }
385
386 old_policy = knet_h->host_index[host_id]->link_handler_policy;
387 knet_h->host_index[host_id]->link_handler_policy = policy;
388
389 if (_host_dstcache_update_async(knet_h, knet_h->host_index[host_id])) {
390 savederrno = errno;
391 err = -1;
392 knet_h->host_index[host_id]->link_handler_policy = old_policy;
393 log_debug(knet_h, KNET_SUB_HOST, "Unable to update switch cache for host %u: %s",
394 host_id, strerror(savederrno));
395 }
396
397 log_debug(knet_h, KNET_SUB_HOST, "Host %u has new switching policy: %u", host_id, policy);
398
399 exit_unlock:
400 pthread_rwlock_unlock(&knet_h->global_rwlock);
401 errno = err ? savederrno : 0;
402 return err;
403 }
404
405 int knet_host_get_policy(knet_handle_t knet_h, knet_node_id_t host_id,
406 uint8_t *policy)
407 {
408 int savederrno = 0, err = 0;
409
410 if (!_is_valid_handle(knet_h)) {
411 return -1;
412 }
413
414 if (!policy) {
415 errno = EINVAL;
416 return -1;
417 }
418
419 savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
420 if (savederrno) {
421 log_err(knet_h, KNET_SUB_HOST, "Unable to get read lock: %s",
422 strerror(savederrno));
423 errno = savederrno;
424 return -1;
425 }
426
427 if (!knet_h->host_index[host_id]) {
428 err = -1;
429 savederrno = EINVAL;
430 log_err(knet_h, KNET_SUB_HOST, "Unable to get name for host %u: %s",
431 host_id, strerror(savederrno));
432 goto exit_unlock;
433 }
434
435 *policy = knet_h->host_index[host_id]->link_handler_policy;
436
437 exit_unlock:
438 pthread_rwlock_unlock(&knet_h->global_rwlock);
439 errno = err ? savederrno : 0;
440 return err;
441 }
442
443 int knet_host_get_status(knet_handle_t knet_h, knet_node_id_t host_id,
444 struct knet_host_status *status)
445 {
446 int savederrno = 0, err = 0;
447 struct knet_host *host;
448
449 if (!_is_valid_handle(knet_h)) {
450 return -1;
451 }
452
453 if (!status) {
454 errno = EINVAL;
455 return -1;
456 }
457
458 savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
459 if (savederrno) {
460 log_err(knet_h, KNET_SUB_HOST, "Unable to get read lock: %s",
461 strerror(savederrno));
462 errno = savederrno;
463 return -1;
464 }
465
466 host = knet_h->host_index[host_id];
467 if (!host) {
468 err = -1;
469 savederrno = EINVAL;
470 log_err(knet_h, KNET_SUB_HOST, "Unable to find host %u: %s",
471 host_id, strerror(savederrno));
472 goto exit_unlock;
473 }
474
475 memmove(status, &host->status, sizeof(struct knet_host_status));
476
477 exit_unlock:
478 pthread_rwlock_unlock(&knet_h->global_rwlock);
479 errno = err ? savederrno : 0;
480 return err;
481 }
482
483 int knet_host_enable_status_change_notify(knet_handle_t knet_h,
484 void *host_status_change_notify_fn_private_data,
485 void (*host_status_change_notify_fn) (
486 void *private_data,
487 knet_node_id_t host_id,
488 uint8_t reachable,
489 uint8_t remote,
490 uint8_t external))
491 {
492 int savederrno = 0;
493
494 if (!_is_valid_handle(knet_h)) {
495 return -1;
496 }
497
498 savederrno = get_global_wrlock(knet_h);
499 if (savederrno) {
500 log_err(knet_h, KNET_SUB_HOST, "Unable to get write lock: %s",
501 strerror(savederrno));
502 errno = savederrno;
503 return -1;
504 }
505
506 knet_h->host_status_change_notify_fn_private_data = host_status_change_notify_fn_private_data;
507 knet_h->host_status_change_notify_fn = host_status_change_notify_fn;
508 if (knet_h->host_status_change_notify_fn) {
509 log_debug(knet_h, KNET_SUB_HOST, "host_status_change_notify_fn enabled");
510 } else {
511 log_debug(knet_h, KNET_SUB_HOST, "host_status_change_notify_fn disabled");
512 }
513
514 pthread_rwlock_unlock(&knet_h->global_rwlock);
515
516 errno = 0;
517 return 0;
518 }
519
520 static void _clear_cbuffers(struct knet_host *host, seq_num_t rx_seq_num)
521 {
522 int i;
523
524 memset(host->circular_buffer, 0, KNET_CBUFFER_SIZE);
525 host->rx_seq_num = rx_seq_num;
526
527 memset(host->circular_buffer_defrag, 0, KNET_CBUFFER_SIZE);
528
529 for (i = 0; i < KNET_DEFRAG_BUFFERS; i++) {
530 memset(&host->defrag_buf[i], 0, sizeof(struct knet_host_defrag_buf));
531 }
532 }
533
534 static void _reclaim_old_defrag_bufs(struct knet_host *host, seq_num_t seq_num)
535 {
536 seq_num_t head, tail; /* seq_num boundaries */
537 int i;
538
539 head = seq_num + 1;
540 tail = seq_num - (KNET_DEFRAG_BUFFERS + 1);
541
542 /*
543 * expire old defrag buffers
544 */
545 for (i = 0; i < KNET_DEFRAG_BUFFERS; i++) {
546 if (host->defrag_buf[i].in_use) {
547 /*
548 * head has done a rollover to 0+
549 */
550 if (tail > head) {
551 if ((host->defrag_buf[i].pckt_seq >= head) && (host->defrag_buf[i].pckt_seq <= tail)) {
552 host->defrag_buf[i].in_use = 0;
553 }
554 } else {
555 if ((host->defrag_buf[i].pckt_seq >= head) || (host->defrag_buf[i].pckt_seq <= tail)){
556 host->defrag_buf[i].in_use = 0;
557 }
558 }
559 }
560 }
561 }
562
563 /*
564 * check if a given packet seq num is in the circular buffers
565 * defrag_buf = 0 -> use normal cbuf 1 -> use the defrag buffer lookup
566 */
567
568 int _seq_num_lookup(struct knet_host *host, seq_num_t seq_num, int defrag_buf, int clear_buf)
569 {
570 size_t head, tail; /* circular buffer indexes */
571 seq_num_t seq_dist;
572 char *dst_cbuf = host->circular_buffer;
573 char *dst_cbuf_defrag = host->circular_buffer_defrag;
574 seq_num_t *dst_seq_num = &host->rx_seq_num;
575
576 /*
577 * There is a potential race condition where the sender
578 * is overloaded, sending data packets before pings
579 * can kick in and set the correct dst_seq_num.
580 *
581 * if this node is starting up (dst_seq_num = 0),
582 * it can start rejecing valid packets and get stuck.
583 *
584 * Set the dst_seq_num to the first seen packet and
585 * use that as reference instead.
586 */
587 if (!*dst_seq_num) {
588 *dst_seq_num = seq_num;
589 }
590
591 if (clear_buf) {
592 _clear_cbuffers(host, seq_num);
593 }
594
595 _reclaim_old_defrag_bufs(host, *dst_seq_num);
596
597 if (seq_num < *dst_seq_num) {
598 seq_dist = (SEQ_MAX - seq_num) + *dst_seq_num;
599 } else {
600 seq_dist = *dst_seq_num - seq_num;
601 }
602
603 head = seq_num % KNET_CBUFFER_SIZE;
604
605 if (seq_dist < KNET_CBUFFER_SIZE) { /* seq num is in ring buffer */
606 if (!defrag_buf) {
607 return (dst_cbuf[head] == 0) ? 1 : 0;
608 } else {
609 return (dst_cbuf_defrag[head] == 0) ? 1 : 0;
610 }
611 } else if (seq_dist <= SEQ_MAX - KNET_CBUFFER_SIZE) {
612 memset(dst_cbuf, 0, KNET_CBUFFER_SIZE);
613 memset(dst_cbuf_defrag, 0, KNET_CBUFFER_SIZE);
614 *dst_seq_num = seq_num;
615 }
616
617 /* cleaning up circular buffer */
618 tail = (*dst_seq_num + 1) % KNET_CBUFFER_SIZE;
619
620 if (tail > head) {
621 memset(dst_cbuf + tail, 0, KNET_CBUFFER_SIZE - tail);
622 memset(dst_cbuf, 0, head + 1);
623 memset(dst_cbuf_defrag + tail, 0, KNET_CBUFFER_SIZE - tail);
624 memset(dst_cbuf_defrag, 0, head + 1);
625 } else {
626 memset(dst_cbuf + tail, 0, head - tail + 1);
627 memset(dst_cbuf_defrag + tail, 0, head - tail + 1);
628 }
629
630 *dst_seq_num = seq_num;
631
632 return 1;
633 }
634
635 void _seq_num_set(struct knet_host *host, seq_num_t seq_num, int defrag_buf)
636 {
637 if (!defrag_buf) {
638 host->circular_buffer[seq_num % KNET_CBUFFER_SIZE] = 1;
639 } else {
640 host->circular_buffer_defrag[seq_num % KNET_CBUFFER_SIZE] = 1;
641 }
642
643 return;
644 }
645
646 int _host_dstcache_update_async(knet_handle_t knet_h, struct knet_host *host)
647 {
648 int savederrno = 0;
649 knet_node_id_t host_id = host->host_id;
650
651 if (sendto(knet_h->dstsockfd[1], &host_id, sizeof(host_id), MSG_DONTWAIT | MSG_NOSIGNAL, NULL, 0) != sizeof(host_id)) {
652 savederrno = errno;
653 log_debug(knet_h, KNET_SUB_HOST, "Unable to write to dstpipefd[1]: %s",
654 strerror(savederrno));
655 errno = savederrno;
656 return -1;
657 }
658
659 return 0;
660 }
661
662 int _host_dstcache_update_sync(knet_handle_t knet_h, struct knet_host *host)
663 {
664 int link_idx;
665 int best_priority = -1;
666 int reachable = 0;
667
668 if (knet_h->host_id == host->host_id && knet_h->has_loop_link) {
669 host->active_link_entries = 1;
670 return 0;
671 }
672
673 host->active_link_entries = 0;
674 for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
675 if (host->link[link_idx].status.enabled != 1) /* link is not enabled */
676 continue;
677 if (host->link[link_idx].status.connected != 1) /* link is not enabled */
678 continue;
679 if (host->link[link_idx].has_valid_mtu != 1) /* link does not have valid MTU */
680 continue;
681
682 if (host->link_handler_policy == KNET_LINK_POLICY_PASSIVE) {
683 /* for passive we look for the only active link with higher priority */
684 if (host->link[link_idx].priority > best_priority) {
685 host->active_links[0] = link_idx;
686 best_priority = host->link[link_idx].priority;
687 }
688 host->active_link_entries = 1;
689 } else {
690 /* for RR and ACTIVE we need to copy all available links */
691 host->active_links[host->active_link_entries] = link_idx;
692 host->active_link_entries++;
693 }
694 }
695
696 if (host->link_handler_policy == KNET_LINK_POLICY_PASSIVE) {
697 log_info(knet_h, KNET_SUB_HOST, "host: %u (passive) best link: %u (pri: %u)",
698 host->host_id, host->link[host->active_links[0]].link_id,
699 host->link[host->active_links[0]].priority);
700 } else {
701 log_info(knet_h, KNET_SUB_HOST, "host: %u has %u active links",
702 host->host_id, host->active_link_entries);
703 }
704
705 /* no active links, we can clean the circular buffers and indexes */
706 if (!host->active_link_entries) {
707 log_warn(knet_h, KNET_SUB_HOST, "host: %u has no active links", host->host_id);
708 _clear_cbuffers(host, 0);
709 } else {
710 reachable = 1;
711 }
712
713 if (host->status.reachable != reachable) {
714 host->status.reachable = reachable;
715 if (knet_h->host_status_change_notify_fn) {
716 knet_h->host_status_change_notify_fn(
717 knet_h->host_status_change_notify_fn_private_data,
718 host->host_id,
719 host->status.reachable,
720 host->status.remote,
721 host->status.external);
722 }
723 }
724
725 return 0;
726 }
727