1 /*
2 Copyright Red Hat, Inc. 2017
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 2, or (at your option) any
7 later version.
8
9 This program is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; see the file COPYING. If not, write to the
16 Free Software Foundation, Inc., 675 Mass Ave, Cambridge,
17 MA 02139, USA.
18 */
19 /*
20 * Author: Ryan McCabe <rmccabe@redhat.com>
21 */
22 #include "config.h"
23
24 #include <stdio.h>
25 #include <sys/types.h>
26 #include <stdint.h>
27 #include <time.h>
28 #include <string.h>
29 #include <syslog.h>
30 #include <errno.h>
31 #include <unistd.h>
32 #include <pthread.h>
33 #include <corosync/cpg.h>
34
35 #include "debug.h"
36 #include "virt.h"
37 #include "xvm.h"
38 #include "cpg.h"
39 #include "simpleconfig.h"
40 #include "static_map.h"
41 #include "server_plugin.h"
42
43 #define NAME "cpg"
44 #define CPG_VERSION "0.1"
45
46 #define MAGIC 0x38e93fc2
47
48 struct cpg_info {
49 int magic;
50 config_object_t *config;
51 int vp_count;
52 virConnectPtr *vp;
53 };
54
55 #define VALIDATE(arg) \
56 do {\
57 if (!arg || ((struct cpg_info *) arg)->magic != MAGIC) { \
58 errno = EINVAL;\
59 return -1; \
60 } \
61 } while(0)
62
63 static struct cpg_info *cpg_virt_handle = NULL;
64 static int use_uuid = 0;
65 pthread_mutex_t local_vm_list_lock = PTHREAD_MUTEX_INITIALIZER;
66 static virt_list_t *local_vm_list = NULL;
67
68 pthread_mutex_t remote_vm_list_lock = PTHREAD_MUTEX_INITIALIZER;
69 static virt_list_t *remote_vm_list = NULL;
70
71 static void cpg_virt_init_libvirt(struct cpg_info *info);
72
73 static int
74 virt_list_update(struct cpg_info *info, virt_list_t **vl, int my_id)
75 {
76 virt_list_t *list = NULL;
77
78 if (*vl)
79 vl_free(*vl);
80
81 list = vl_get(info->vp, info->vp_count, my_id);
82 if (!list && (errno == EPIPE || errno == EINVAL)) {
83 do {
84 cpg_virt_init_libvirt(info);
85 } while (info->vp_count == 0);
86 list = vl_get(info->vp, info->vp_count, my_id);
87 }
88
89 *vl = list;
90 if (!list)
91 return -1;
92
93 return 0;
94 }
95
96
97 static void
98 store_domains(virt_list_t *vl)
99 {
100 int i;
101
102 if (!vl)
103 return;
104
105 for (i = 0 ; i < vl->vm_count ; i++) {
106 int ret;
107
108 if (!strcmp(DOMAIN0NAME, vl->vm_states[i].v_name))
109 continue;
110
111 ret = cpg_send_vm_state(&vl->vm_states[i]);
112 if (ret < 0) {
113 printf("Error storing VM state for %s|%s\n",
114 vl->vm_states[i].v_name, vl->vm_states[i].v_uuid);
115 }
116 }
117 }
118
119
120 static void
121 update_local_vms(struct cpg_info *info)
122 {
123 uint32_t my_id = 0;
124
125 if (!info)
126 return;
127
128 cpg_get_ids(&my_id, NULL);
129 virt_list_update(info, &local_vm_list, my_id);
130 store_domains(local_vm_list);
131 }
132
133
134 static int
135 do_off(struct cpg_info *info, const char *vm_name)
136 {
137 dbg_printf(5, "%s %s\n", __FUNCTION__, vm_name);
138 return vm_off(info->vp, info->vp_count, vm_name);
139
140 }
141
142 static int
143 do_on(struct cpg_info *info, const char *vm_name)
144 {
145 dbg_printf(5, "%s %s\n", __FUNCTION__, vm_name);
146 return vm_on(info->vp, info->vp_count, vm_name);
147
148 }
149
150 static int
151 do_reboot(struct cpg_info *info, const char *vm_name)
152 {
153 dbg_printf(5, "%s %s\n", __FUNCTION__, vm_name);
154 return vm_reboot(info->vp, info->vp_count, vm_name);
155 }
156
157 static void
158 cpg_join_cb(const struct cpg_address *join, size_t joinlen) {
159 struct cpg_info *info = cpg_virt_handle;
160
161 pthread_mutex_lock(&local_vm_list_lock);
162 update_local_vms(info);
163 pthread_mutex_unlock(&local_vm_list_lock);
164 }
165
166 static void
167 cpg_leave_cb(const struct cpg_address *left, size_t leftlen) {
168 struct cpg_info *info = cpg_virt_handle;
169 int i;
170
171 pthread_mutex_lock(&remote_vm_list_lock);
172 for (i = 0 ; i < leftlen ; i++) {
173 dbg_printf(2, "Removing VMs owned by nodeid %u\n", left[i].nodeid);
174 vl_remove_by_owner(&remote_vm_list, left[i].nodeid);
175 }
176 pthread_mutex_unlock(&remote_vm_list_lock);
177
178 pthread_mutex_lock(&local_vm_list_lock);
179 update_local_vms(info);
180 pthread_mutex_unlock(&local_vm_list_lock);
181 }
182
183 static void
184 store_cb(void *data, size_t len, uint32_t nodeid, uint32_t seqno)
185 {
186 uint32_t my_id;
187 virt_state_t *vs = (virt_state_t *) data;
188 struct cpg_info *info = cpg_virt_handle;
189
190 cpg_get_ids(&my_id, NULL);
191
192 if (nodeid == my_id)
193 return;
194
195 pthread_mutex_lock(&local_vm_list_lock);
196 if (!local_vm_list)
197 update_local_vms(info);
198 pthread_mutex_unlock(&local_vm_list_lock);
199
200 pthread_mutex_lock(&remote_vm_list_lock);
201 vl_update(&remote_vm_list, vs);
202 pthread_mutex_unlock(&remote_vm_list_lock);
203 }
204
205 /*
206 ** This function must a send reply from at least one node, otherwise
207 ** the requesting fence_virtd will block forever in wait_cpt_reply.
208 */
209 static void
210 do_real_work(void *data, size_t len, uint32_t nodeid, uint32_t seqno)
211 {
212 struct cpg_info *info = cpg_virt_handle;
213 struct cpg_fence_req *req = data;
214 struct cpg_fence_req reply;
215 int reply_code = -1;
216 virt_state_t *vs = NULL;
217 int cur_state;
218 uint32_t cur_owner = 0;
219 int local = 0;
220 uint32_t my_id, high_id;
221
222 dbg_printf(2, "Request %d for VM %s\n", req->request, req->vm_name);
223
224 if (cpg_get_ids(&my_id, &high_id) == -1) {
225 syslog(LOG_WARNING, "Unable to get CPG IDs");
226 printf("Should never happen: Can't get CPG node ids - can't proceed\n");
227 return;
228 }
229
230 memcpy(&reply, req, sizeof(reply));
231
232 pthread_mutex_lock(&local_vm_list_lock);
233 update_local_vms(info);
234 if (strlen(req->vm_name)) {
235 if (use_uuid)
236 vs = vl_find_uuid(local_vm_list, req->vm_name);
237 else
238 vs = vl_find_name(local_vm_list, req->vm_name);
239
240 if (vs) {
241 local = 1;
242 cur_owner = vs->v_state.s_owner;
243 cur_state = vs->v_state.s_state;
244 dbg_printf(2, "Found VM %s locally state %d\n",
245 req->vm_name, cur_state);
246 }
247 }
248 pthread_mutex_unlock(&local_vm_list_lock);
249
250 if (vs == NULL) {
251 pthread_mutex_lock(&remote_vm_list_lock);
252 if (strlen(req->vm_name)) {
253 if (use_uuid)
254 vs = vl_find_uuid(remote_vm_list, req->vm_name);
255 else
256 vs = vl_find_name(remote_vm_list, req->vm_name);
257
258 if (vs) {
259 cur_owner = vs->v_state.s_owner;
260 cur_state = vs->v_state.s_state;
261 dbg_printf(2, "Found VM %s remotely on %u state %d\n",
262 req->vm_name, cur_owner, cur_state);
263 }
264 }
265 pthread_mutex_unlock(&remote_vm_list_lock);
266 }
267
268 if (!vs) {
269 /*
270 ** We know about all domains on all nodes in the CPG group.
271 ** If we didn't find it, and we're high ID, act on the request.
272 ** We can safely assume the VM is OFF because it wasn't found
273 ** on any current members of the CPG group.
274 */
275 if (my_id == high_id) {
276 if (req->request == FENCE_STATUS)
277 reply_code = RESP_OFF;
278 else if (req->request == FENCE_OFF || req->request == FENCE_REBOOT)
279 reply_code = RESP_SUCCESS;
280 else
281 reply_code = 1;
282
283 dbg_printf(2, "Acting on request %d for unknown domain %s -> %d\n",
284 req->request, req->vm_name, reply_code);
285 goto out;
286 }
287
288 dbg_printf(2, "Not acting on request %d for unknown domain %s\n",
289 req->request, req->vm_name);
290 return;
291 }
292
293 if (local) {
294 if (req->request == FENCE_STATUS) {
295 /* We already have the status */
296 if (cur_state == VIR_DOMAIN_SHUTOFF)
297 reply_code = RESP_OFF;
298 else
299 reply_code = RESP_SUCCESS;
300 } else if (req->request == FENCE_OFF) {
301 reply_code = do_off(info, req->vm_name);
302 } else if (req->request == FENCE_ON) {
303 reply_code = do_on(info, req->vm_name);
304 } else if (req->request == FENCE_REBOOT) {
305 reply_code = do_reboot(info, req->vm_name);
306 } else {
307 dbg_printf(2, "Not explicitly handling request type %d for %s\n",
308 req->request, req->vm_name);
309 reply_code = 0;
310 }
311 goto out;
312 }
313
314 /*
315 ** This is a request for a non-local domain that exists on a
316 ** current CPG group member, so that member will see the request
317 ** and act on it. We don't need to do anything.
318 */
319 dbg_printf(2, "Nothing to do for non-local domain %s seq %d owner %u\n",
320 req->vm_name, seqno, cur_owner);
321 return;
322
323 out:
324 dbg_printf(2, "[%s] sending reply code seq %d -> %d\n",
325 req->vm_name, seqno, reply_code);
326
327 reply.response = reply_code;
328 if (cpg_send_reply(&reply, sizeof(reply), nodeid, seqno) < 0) {
329 dbg_printf(2, "cpg_send_reply failed for %s [%d %d]: %s\n",
330 req->vm_name, nodeid, seqno, strerror(errno));
331 }
332 }
333
334
335 static int
336 do_request(const char *vm_name, int request, uint32_t seqno)
337 {
338 struct cpg_fence_req freq, *frp;
339 size_t retlen;
340 uint32_t seq;
341 int ret;
342
343 memset(&freq, 0, sizeof(freq));
344 if (!vm_name) {
345 dbg_printf(1, "No VM name\n");
346 return 1;
347 }
348
349 if (strlen(vm_name) >= sizeof(freq.vm_name)) {
350 dbg_printf(1, "VM name %s too long\n", vm_name);
351 return 1;
352 }
353
354 strcpy(freq.vm_name, vm_name);
355
356 freq.request = request;
357 freq.seqno = seqno;
358
359 if (cpg_send_req(&freq, sizeof(freq), &seq) != 0) {
360 dbg_printf(1, "Failed to send request %d for VM %s\n",
361 freq.request, vm_name);
362 return 1;
363 }
364
365 dbg_printf(2, "Sent request %d for VM %s got seqno %d\n",
366 request, vm_name, seq);
367
368 if (cpg_wait_reply((void *) &frp, &retlen, seq) != 0) {
369 dbg_printf(1, "Failed to receive reply seq %d for %s\n", seq, vm_name);
370 return 1;
371 }
372
373 dbg_printf(2, "Received reply [%d] seq %d for %s\n",
374 frp->response, seq, vm_name);
375
376 ret = frp->response;
377 free(frp);
378
379 return ret;
380 }
381
382
383 static int
384 cpg_virt_null(const char *vm_name, void *priv)
385 {
386 VALIDATE(priv);
387 printf("[cpg-virt] Null operation on %s\n", vm_name);
388
389 return 1;
390 }
391
392
393 static int
394 cpg_virt_off(const char *vm_name, const char *src, uint32_t seqno, void *priv)
395 {
396 VALIDATE(priv);
397 printf("[cpg-virt] OFF operation on %s seq %d\n", vm_name, seqno);
398
399 return do_request(vm_name, FENCE_OFF, seqno);
400 }
401
402
403 static int
404 cpg_virt_on(const char *vm_name, const char *src, uint32_t seqno, void *priv)
405 {
406 VALIDATE(priv);
407 printf("[cpg-virt] ON operation on %s seq %d\n", vm_name, seqno);
408
409 return do_request(vm_name, FENCE_ON, seqno);
410 }
411
412
413 static int
414 cpg_virt_devstatus(void *priv)
415 {
416 printf("[cpg-virt] Device status\n");
417 VALIDATE(priv);
418
419 return 0;
420 }
421
422
423 static int
424 cpg_virt_status(const char *vm_name, void *priv)
425 {
426 VALIDATE(priv);
427 printf("[cpg-virt] STATUS operation on %s\n", vm_name);
428
429 return do_request(vm_name, FENCE_STATUS, 0);
430 }
431
432
433 static int
434 cpg_virt_reboot(const char *vm_name, const char *src,
435 uint32_t seqno, void *priv)
436 {
437 VALIDATE(priv);
438 printf("[cpg-virt] REBOOT operation on %s seq %d\n", vm_name, seqno);
439
440 return do_request(vm_name, FENCE_REBOOT, 0);
441 }
442
443
444 static int
445 cpg_virt_hostlist(hostlist_callback callback, void *arg, void *priv)
446 {
447 struct cpg_info *info = (struct cpg_info *) priv;
448 int i;
449
450 VALIDATE(priv);
451 printf("[cpg-virt] HOSTLIST operation\n");
452
453 pthread_mutex_lock(&local_vm_list_lock);
454 update_local_vms(info);
455 for (i = 0 ; i < local_vm_list->vm_count ; i++) {
456 callback(local_vm_list->vm_states[i].v_name,
457 local_vm_list->vm_states[i].v_uuid,
458 local_vm_list->vm_states[i].v_state.s_state, arg);
459 }
460 pthread_mutex_unlock(&local_vm_list_lock);
461
462 return 1;
463 }
464
465 static void
466 cpg_virt_init_libvirt(struct cpg_info *info) {
467 config_object_t *config = info->config;
468 int i = 0;
469
|
(1) Event cond_true: |
Condition "info->vp", taking true branch. |
470 if (info->vp) {
|
(2) Event cond_true: |
Condition "dget() >= 2", taking true branch. |
471 dbg_printf(2, "Lost libvirtd connection. Reinitializing.\n");
|
(3) Event cond_true: |
Condition "i < info->vp_count", taking true branch. |
|
(5) Event loop_begin: |
Jumped back to beginning of loop. |
|
(6) Event cond_true: |
Condition "i < info->vp_count", taking true branch. |
|
(8) Event loop_begin: |
Jumped back to beginning of loop. |
|
(9) Event cond_false: |
Condition "i < info->vp_count", taking false branch. |
472 for (i = 0 ; i < info->vp_count ; i++)
|
(4) Event loop: |
Jumping back to the beginning of the loop. |
|
(7) Event loop: |
Jumping back to the beginning of the loop. |
|
(10) Event loop_end: |
Reached end of loop. |
473 virConnectClose(info->vp[i]);
474 free(info->vp);
475 info->vp = NULL;
476 }
477 info->vp_count = 0;
478
479 do {
480 virConnectPtr vp;
481 virConnectPtr *vpl = NULL;
482 char conf_attr[256];
483 char value[1024];
484 char *uri;
485
|
(11) Event cond_true: |
Condition "i != 0", taking true branch. |
|
(20) Event cond_true: |
Condition "i != 0", taking true branch. |
|
(36) Event cond_true: |
Condition "i != 0", taking true branch. |
486 if (i != 0) {
487 snprintf(conf_attr, sizeof(conf_attr),
488 "backends/cpg/@uri%d", i);
|
(12) Event if_fallthrough: |
Falling through to end of if statement. |
|
(21) Event if_fallthrough: |
Falling through to end of if statement. |
|
(37) Event if_fallthrough: |
Falling through to end of if statement. |
489 } else
|
(13) Event if_end: |
End of if statement. |
|
(22) Event if_end: |
End of if statement. |
|
(38) Event if_end: |
End of if statement. |
490 snprintf(conf_attr, sizeof(conf_attr), "backends/cpg/@uri");
491 ++i;
492
|
(14) Event cond_false: |
Condition "config->get(config->info, conf_attr, value, 1024UL /* sizeof (value) */) != 0", taking false branch. |
|
(23) Event cond_false: |
Condition "config->get(config->info, conf_attr, value, 1024UL /* sizeof (value) */) != 0", taking false branch. |
|
(39) Event cond_true: |
Condition "config->get(config->info, conf_attr, value, 1024UL /* sizeof (value) */) != 0", taking true branch. |
493 if (sc_get(config, conf_attr, value, sizeof(value)) != 0)
|
(15) Event if_end: |
End of if statement. |
|
(24) Event if_end: |
End of if statement. |
|
(40) Event break: |
Breaking from loop. |
494 break;
495
496 uri = value;
497 vp = virConnectOpen(uri);
|
(16) Event cond_true: |
Condition "!vp", taking true branch. |
|
(25) Event cond_false: |
Condition "!vp", taking false branch. |
498 if (!vp) {
|
(17) Event cond_true: |
Condition "dget() >= 1", taking true branch. |
499 dbg_printf(1, "[cpg-virt:INIT] Failed to connect to URI: %s\n", uri);
|
(18) Event continue: |
Continuing loop. |
500 continue;
|
(26) Event if_end: |
End of if statement. |
501 }
502
|
(27) Event alloc_fn: |
Storage is returned from allocation function "realloc". |
|
(28) Event assign: |
Assigning: "vpl" = "realloc(info->vp, 8UL * (info->vp_count + 1))". |
| Also see events: |
[assign] |
503 vpl = realloc(info->vp, sizeof(*info->vp) * (info->vp_count + 1));
|
(29) Event cond_false: |
Condition "!vpl", taking false branch. |
504 if (!vpl) {
505 dbg_printf(1, "[cpg-virt:INIT] Out of memory allocating URI: %s\n",
506 uri);
507 virConnectClose(vp);
508 continue;
|
(30) Event if_end: |
End of if statement. |
509 }
510
|
(31) Event assign: |
Assigning: "info->vp" = "vpl". |
| Also see events: |
[alloc_fn][assign] |
511 info->vp = vpl;
512 info->vp[info->vp_count++] = vp;
513
|
(32) Event cond_true: |
Condition "i > 1", taking true branch. |
514 if (i > 1)
|
(33) Event cond_true: |
Condition "dget() >= 1", taking true branch. |
|
(34) Event if_fallthrough: |
Falling through to end of if statement. |
515 dbg_printf(1, "[cpg-virt:INIT] Added URI%d %s\n", i - 1, uri);
516 else
|
(35) Event if_end: |
End of if statement. |
517 dbg_printf(1, "[cpg_virt:INIT] Added URI %s\n", uri);
|
(19) Event loop: |
Looping back. |
|
(41) Event loop_end: |
Reached end of loop. |
518 } while (1);
519 }
520
521 static int
522 cpg_virt_init(backend_context_t *c, config_object_t *config)
523 {
524 char value[1024];
525 struct cpg_info *info = NULL;
526 int ret;
527
528 ret = cpg_start(PACKAGE_NAME,
529 do_real_work, store_cb, cpg_join_cb, cpg_leave_cb);
|
(1) Event cond_false: |
Condition "ret < 0", taking false branch. |
530 if (ret < 0)
|
(2) Event if_end: |
End of if statement. |
531 return -1;
532
533 info = calloc(1, sizeof(*info));
|
(3) Event cond_false: |
Condition "!info", taking false branch. |
534 if (!info)
|
(4) Event if_end: |
End of if statement. |
535 return -1;
536 info->magic = MAGIC;
537 info->config = config;
538
|
(5) Event cond_true: |
Condition "config->get(config->info, "fence_virtd/@debug", value, 1024UL /* sizeof (value) */) == 0", taking true branch. |
539 if (sc_get(config, "fence_virtd/@debug", value, sizeof(value)) == 0)
540 dset(atoi(value));
541
|
(6) Event alloc_arg: |
"cpg_virt_init_libvirt" allocates memory that is stored into "info->vp". [details] |
| Also see events: |
[leaked_storage] |
542 cpg_virt_init_libvirt(info);
543
544 /* Naming scheme is no longer a top-level config option.
545 * However, we retain it here for configuration compatibility with
546 * versions 0.1.3 and previous.
547 */
|
(7) Event cond_true: |
Condition "config->get(config->info, "fence_virtd/@name_mode", value, 1023UL /* sizeof (value) - 1 */) == 0", taking true branch. |
548 if (sc_get(config, "fence_virtd/@name_mode",
549 value, sizeof(value)-1) == 0) {
550
|
(8) Event cond_true: |
Condition "dget() >= 1", taking true branch. |
551 dbg_printf(1, "Got %s for name_mode\n", value);
|
(9) Event cond_true: |
Condition "!strcasecmp(value, "uuid")", taking true branch. |
552 if (!strcasecmp(value, "uuid")) {
553 use_uuid = 1;
|
(10) Event if_fallthrough: |
Falling through to end of if statement. |
554 } else if (!strcasecmp(value, "name")) {
555 use_uuid = 0;
556 } else {
557 dbg_printf(1, "Unsupported name_mode: %s\n", value);
|
(11) Event if_end: |
End of if statement. |
558 }
559 }
560
|
(12) Event cond_true: |
Condition "config->get(config->info, "backends/cpg/@name_mode", value, 1023UL /* sizeof (value) - 1 */) == 0", taking true branch. |
561 if (sc_get(config, "backends/cpg/@name_mode",
562 value, sizeof(value)-1) == 0)
563 {
|
(13) Event cond_true: |
Condition "dget() >= 1", taking true branch. |
564 dbg_printf(1, "Got %s for name_mode\n", value);
|
(14) Event cond_true: |
Condition "!strcasecmp(value, "uuid")", taking true branch. |
565 if (!strcasecmp(value, "uuid")) {
566 use_uuid = 1;
|
(15) Event if_fallthrough: |
Falling through to end of if statement. |
567 } else if (!strcasecmp(value, "name")) {
568 use_uuid = 0;
569 } else {
570 dbg_printf(1, "Unsupported name_mode: %s\n", value);
|
(16) Event if_end: |
End of if statement. |
571 }
572 }
573
|
(17) Event cond_true: |
Condition "info->vp_count < 1", taking true branch. |
574 if (info->vp_count < 1) {
|
(18) Event cond_true: |
Condition "dget() >= 1", taking true branch. |
575 dbg_printf(1, "[cpg_virt:INIT] Could not connect to any hypervisors\n");
576 cpg_stop();
|
(19) Event leaked_storage: |
Freeing "info" without freeing its pointer field "vp" leaks the storage that "vp" points to. |
| Also see events: |
[alloc_arg] |
577 free(info);
578 return -1;
579 }
580
581 pthread_mutex_lock(&local_vm_list_lock);
582 update_local_vms(info);
583 pthread_mutex_unlock(&local_vm_list_lock);
584
585 *c = (void *) info;
586 cpg_virt_handle = info;
587 return 0;
588 }
589
590
591 static int
592 cpg_virt_shutdown(backend_context_t c)
593 {
594 struct cpg_info *info = (struct cpg_info *)c;
595 int i = 0;
596 int ret = 0;
597
598 VALIDATE(info);
599 info->magic = 0;
600
601 cpg_stop();
602
603 for (i = 0 ; i < info->vp_count ; i++) {
604 if (virConnectClose(info->vp[i]) < 0)
605 ret = -errno;
606 }
607
608 free(info->vp);
609 free(info);
610
611 return ret;
612 }
613
614
615 static fence_callbacks_t cpg_callbacks = {
616 .null = cpg_virt_null,
617 .off = cpg_virt_off,
618 .on = cpg_virt_on,
619 .reboot = cpg_virt_reboot,
620 .status = cpg_virt_status,
621 .devstatus = cpg_virt_devstatus,
622 .hostlist = cpg_virt_hostlist
623 };
624
625 static backend_plugin_t cpg_virt_plugin = {
626 .name = NAME,
627 .version = CPG_VERSION,
628 .callbacks = &cpg_callbacks,
629 .init = cpg_virt_init,
630 .cleanup = cpg_virt_shutdown,
631 };
632
633 double
634 BACKEND_VER_SYM(void)
635 {
636 return PLUGIN_VERSION_BACKEND;
637 }
638
639 const backend_plugin_t *
640 BACKEND_INFO_SYM(void)
641 {
642 return &cpg_virt_plugin;
643 }
644