* [pve-devel] [PATCH qemu-server 0/3] qmeventd: improve shutdown behaviour @ 2022-09-21 12:49 Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s Dominik Csapak ` (2 more replies) 0 siblings, 3 replies; 13+ messages in thread From: Dominik Csapak @ 2022-09-21 12:49 UTC (permalink / raw) To: pve-devel includes the following improvements: * increases 'force cleanup' timeout to 60s (from 5) * saves individual timeout for each vm * don't force cleanup for vms where normal cleanup worked * sending QMP quit instead of SIGTERM (less log noise) Dominik Csapak (3): qmeventd: rework 'forced_cleanup' handling and set timeout to 60s qmeventd: cancel 'forced cleanup' when normal cleanup succeeds qmeventd: send QMP 'quit' command instead of SIGTERM qmeventd/qmeventd.c | 109 +++++++++++++++++++++++++++++--------------- qmeventd/qmeventd.h | 2 + 2 files changed, 75 insertions(+), 36 deletions(-) -- 2.30.2 ^ permalink raw reply [flat|nested] 13+ messages in thread
* [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-21 12:49 [pve-devel] [PATCH qemu-server 0/3] qmeventd: improve shutdown behaviour Dominik Csapak @ 2022-09-21 12:49 ` Dominik Csapak [not found] ` <<20220921124911.3224970-2-d.csapak@proxmox.com> 2022-09-22 11:51 ` Thomas Lamprecht 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 3/3] qmeventd: send QMP 'quit' command instead of SIGTERM Dominik Csapak 2 siblings, 2 replies; 13+ messages in thread From: Dominik Csapak @ 2022-09-21 12:49 UTC (permalink / raw) To: pve-devel currently, the 'forced_cleanup' (sending SIGKILL to the qemu process), is intended to be triggered 5 seconds after sending the initial shutdown signal (SIGTERM) which is sadly not enough for some setups. Accidentally, it could be triggered earlier than 5 seconds, if a SIGALRM triggers in the timespan directly before setting it again. Also, this approach means that depending on when machines are shutdown their forced cleanup may happen after 5 seconds, or any time after, if new vms are shut off in the meantime. To improve this situation, rework the way we deal with this cleanup, by saving the time incl. timeout in the CleanupData, and trigger a forced_cleanup every 10 seconds. If that happends, remove it from the forced_cleanup list. To improve the shutdown behaviour, increase the default timeout to 60 seconds, which should be enough, but add a commandline toggle where users can set it to a different value. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- qmeventd/qmeventd.c | 75 +++++++++++++++++++++++++-------------------- qmeventd/qmeventd.h | 2 ++ 2 files changed, 44 insertions(+), 33 deletions(-) diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c index 8d32827..e9ff5b3 100644 --- a/qmeventd/qmeventd.c +++ b/qmeventd/qmeventd.c @@ -29,6 +29,7 @@ #include <signal.h> #include <stdbool.h> #include <stdio.h> +#include <stdlib.h> #include <string.h> #include <sys/epoll.h> #include <sys/socket.h> @@ -36,15 +37,18 @@ #include <sys/un.h> #include <sys/wait.h> #include <unistd.h> +#include <time.h> #include "qmeventd.h" +#define DEFAULT_KILL_TIMEOUT 60 + static int verbose = 0; +static int kill_timeout = DEFAULT_KILL_TIMEOUT; static int epoll_fd = 0; static const char *progname; GHashTable *vm_clients; // key=vmid (freed on remove), value=*Client (free manually) GSList *forced_cleanups; -volatile sig_atomic_t alarm_triggered = 0; /* * Helper functions @@ -54,9 +58,10 @@ static void usage() { fprintf(stderr, "Usage: %s [-f] [-v] PATH\n", progname); - fprintf(stderr, " -f run in foreground (default: false)\n"); - fprintf(stderr, " -v verbose (default: false)\n"); - fprintf(stderr, " PATH use PATH for socket\n"); + fprintf(stderr, " -f run in foreground (default: false)\n"); + fprintf(stderr, " -v verbose (default: false)\n"); + fprintf(stderr, " -t timeout kill timeout (default: %ds)\n", DEFAULT_KILL_TIMEOUT); + fprintf(stderr, " PATH use PATH for socket\n"); } static pid_t @@ -469,16 +474,16 @@ terminate_client(struct Client *client) int err = kill(client->pid, SIGTERM); log_neg(err, "kill"); + time_t timeout = time(NULL) + kill_timeout; + struct CleanupData *data_ptr = malloc(sizeof(struct CleanupData)); struct CleanupData data = { .pid = client->pid, - .pidfd = pidfd + .pidfd = pidfd, + .timeout = timeout, }; *data_ptr = data; forced_cleanups = g_slist_prepend(forced_cleanups, (void *)data_ptr); - - // resets any other alarms, but will fire eventually and cleanup all - alarm(5); } void @@ -551,27 +556,16 @@ handle_client(struct Client *client) json_tokener_free(tok); } - -/* - * SIGALRM and cleanup handling - * - * terminate_client will set an alarm for 5 seconds and add its client's PID to - * the forced_cleanups list - when the timer expires, we iterate the list and - * attempt to issue SIGKILL to all processes which haven't yet stopped. - */ - -static void -alarm_handler(__attribute__((unused)) int signum) -{ - alarm_triggered = 1; -} - static void sigkill(void *ptr, __attribute__((unused)) void *unused) { struct CleanupData data = *((struct CleanupData *)ptr); int err; + if (data.timeout > time(NULL)) { + return; + } + if (data.pidfd > 0) { err = pidfd_send_signal(data.pidfd, SIGKILL, NULL, 0); (void)close(data.pidfd); @@ -588,21 +582,29 @@ sigkill(void *ptr, __attribute__((unused)) void *unused) fprintf(stderr, "cleanup failed, terminating pid '%d' with SIGKILL\n", data.pid); } + + // remove ourselves from the list + forced_cleanups = g_slist_remove(forced_cleanups, ptr); + free(ptr); } +/* + * SIGALRM and cleanup handling + * + * handles the cleanup on non terminated qemu processes, will be called every + * 10 seconds by setting 'alarm(10)' at the end again + */ + static void -handle_forced_cleanup() +alarm_handler(__attribute__((unused)) int signum) { - if (alarm_triggered) { + if (g_slist_length(forced_cleanups) > 0) { VERBOSE_PRINT("clearing forced cleanup backlog\n"); - alarm_triggered = 0; g_slist_foreach(forced_cleanups, sigkill, NULL); - g_slist_free_full(forced_cleanups, free); - forced_cleanups = NULL; } + alarm(10); } - int main(int argc, char *argv[]) { @@ -611,7 +613,7 @@ main(int argc, char *argv[]) char *socket_path = NULL; progname = argv[0]; - while ((opt = getopt(argc, argv, "hfv")) != -1) { + while ((opt = getopt(argc, argv, "hfvt:")) != -1) { switch (opt) { case 'f': daemonize = 0; @@ -619,6 +621,15 @@ main(int argc, char *argv[]) case 'v': verbose = 1; break; + case 't': + errno = 0; + char *endptr = NULL; + kill_timeout = strtoul(optarg, &endptr, 10); + if (errno != 0 || *endptr != '\0' || kill_timeout == 0) { + usage(); + exit(EXIT_FAILURE); + } + break; case 'h': usage(); exit(EXIT_SUCCESS); @@ -668,10 +679,10 @@ main(int argc, char *argv[]) int nevents; + alarm(10); for(;;) { nevents = epoll_wait(epoll_fd, events, 1, -1); if (nevents < 0 && errno == EINTR) { - handle_forced_cleanup(); continue; } bail_neg(nevents, "epoll_wait"); @@ -688,7 +699,5 @@ main(int argc, char *argv[]) handle_client((struct Client *)events[n].data.ptr); } } - - handle_forced_cleanup(); } } diff --git a/qmeventd/qmeventd.h b/qmeventd/qmeventd.h index 2cf1947..1b9cea1 100644 --- a/qmeventd/qmeventd.h +++ b/qmeventd/qmeventd.h @@ -7,6 +7,7 @@ */ #include <sys/syscall.h> +#include <time.h> #ifndef __NR_pidfd_open #define __NR_pidfd_open 434 @@ -86,6 +87,7 @@ struct Client { struct CleanupData { pid_t pid; int pidfd; + time_t timeout; }; void handle_qmp_handshake(struct Client *client); -- 2.30.2 ^ permalink raw reply [flat|nested] 13+ messages in thread
[parent not found: <<20220921124911.3224970-2-d.csapak@proxmox.com>]
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s [not found] ` <<20220921124911.3224970-2-d.csapak@proxmox.com> @ 2022-09-22 8:24 ` Fabian Grünbichler 2022-09-22 11:31 ` Dominik Csapak 0 siblings, 1 reply; 13+ messages in thread From: Fabian Grünbichler @ 2022-09-22 8:24 UTC (permalink / raw) To: Proxmox VE development discussion On September 21, 2022 2:49 pm, Dominik Csapak wrote: > currently, the 'forced_cleanup' (sending SIGKILL to the qemu process), > is intended to be triggered 5 seconds after sending the initial shutdown > signal (SIGTERM) which is sadly not enough for some setups. > > Accidentally, it could be triggered earlier than 5 seconds, if a > SIGALRM triggers in the timespan directly before setting it again. > > Also, this approach means that depending on when machines are shutdown > their forced cleanup may happen after 5 seconds, or any time after, if > new vms are shut off in the meantime. > > To improve this situation, rework the way we deal with this cleanup, by > saving the time incl. timeout in the CleanupData, and trigger a > forced_cleanup every 10 seconds. If that happends, remove it from > the forced_cleanup list. > > To improve the shutdown behaviour, increase the default timeout to 60 > seconds, which should be enough, but add a commandline toggle where > users can set it to a different value. > > Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> > --- > qmeventd/qmeventd.c | 75 +++++++++++++++++++++++++-------------------- > qmeventd/qmeventd.h | 2 ++ > 2 files changed, 44 insertions(+), 33 deletions(-) > > diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c > index 8d32827..e9ff5b3 100644 > --- a/qmeventd/qmeventd.c > +++ b/qmeventd/qmeventd.c > @@ -29,6 +29,7 @@ > #include <signal.h> > #include <stdbool.h> > #include <stdio.h> > +#include <stdlib.h> > #include <string.h> > #include <sys/epoll.h> > #include <sys/socket.h> > @@ -36,15 +37,18 @@ > #include <sys/un.h> > #include <sys/wait.h> > #include <unistd.h> > +#include <time.h> > > #include "qmeventd.h" > > +#define DEFAULT_KILL_TIMEOUT 60 > + > static int verbose = 0; > +static int kill_timeout = DEFAULT_KILL_TIMEOUT; > static int epoll_fd = 0; > static const char *progname; > GHashTable *vm_clients; // key=vmid (freed on remove), value=*Client (free manually) > GSList *forced_cleanups; > -volatile sig_atomic_t alarm_triggered = 0; > > /* > * Helper functions > @@ -54,9 +58,10 @@ static void > usage() > { > fprintf(stderr, "Usage: %s [-f] [-v] PATH\n", progname); > - fprintf(stderr, " -f run in foreground (default: false)\n"); > - fprintf(stderr, " -v verbose (default: false)\n"); > - fprintf(stderr, " PATH use PATH for socket\n"); > + fprintf(stderr, " -f run in foreground (default: false)\n"); > + fprintf(stderr, " -v verbose (default: false)\n"); > + fprintf(stderr, " -t timeout kill timeout (default: %ds)\n", DEFAULT_KILL_TIMEOUT); > + fprintf(stderr, " PATH use PATH for socket\n"); > } > > static pid_t > @@ -469,16 +474,16 @@ terminate_client(struct Client *client) > int err = kill(client->pid, SIGTERM); > log_neg(err, "kill"); > > + time_t timeout = time(NULL) + kill_timeout; > + > struct CleanupData *data_ptr = malloc(sizeof(struct CleanupData)); > struct CleanupData data = { > .pid = client->pid, > - .pidfd = pidfd > + .pidfd = pidfd, > + .timeout = timeout, > }; > *data_ptr = data; > forced_cleanups = g_slist_prepend(forced_cleanups, (void *)data_ptr); > - > - // resets any other alarms, but will fire eventually and cleanup all > - alarm(5); > } > > void > @@ -551,27 +556,16 @@ handle_client(struct Client *client) > json_tokener_free(tok); > } > > - > -/* > - * SIGALRM and cleanup handling > - * > - * terminate_client will set an alarm for 5 seconds and add its client's PID to > - * the forced_cleanups list - when the timer expires, we iterate the list and > - * attempt to issue SIGKILL to all processes which haven't yet stopped. > - */ > - > -static void > -alarm_handler(__attribute__((unused)) int signum) > -{ > - alarm_triggered = 1; > -} > - wasn't this intentionally decoupled like this? alarm_handler just sets the flag actual force cleanup is conditionalized on the alarm having triggered, but the cleanup happens outside of the signal handler.. is there a reason from switching away from these scheme? we don't need to do the cleanup in the signal handler (timing is already plenty fuzzy anyway ;)) > static void > sigkill(void *ptr, __attribute__((unused)) void *unused) > { > struct CleanupData data = *((struct CleanupData *)ptr); > int err; > > + if (data.timeout > time(NULL)) { nit: current time / cutoff could be passed in via the currently unused user_data parameter.. > + return; > + } > + > if (data.pidfd > 0) { > err = pidfd_send_signal(data.pidfd, SIGKILL, NULL, 0); > (void)close(data.pidfd); > @@ -588,21 +582,29 @@ sigkill(void *ptr, __attribute__((unused)) void *unused) > fprintf(stderr, "cleanup failed, terminating pid '%d' with SIGKILL\n", > data.pid); > } > + > + // remove ourselves from the list > + forced_cleanups = g_slist_remove(forced_cleanups, ptr); > + free(ptr); > } > > +/* > + * SIGALRM and cleanup handling > + * > + * handles the cleanup on non terminated qemu processes, will be called every > + * 10 seconds by setting 'alarm(10)' at the end again > + */ > + > static void > -handle_forced_cleanup() > +alarm_handler(__attribute__((unused)) int signum) > { > - if (alarm_triggered) { > + if (g_slist_length(forced_cleanups) > 0) { > VERBOSE_PRINT("clearing forced cleanup backlog\n"); > - alarm_triggered = 0; > g_slist_foreach(forced_cleanups, sigkill, NULL); > - g_slist_free_full(forced_cleanups, free); > - forced_cleanups = NULL; > } > + alarm(10); > } > > - > int > main(int argc, char *argv[]) > { > @@ -611,7 +613,7 @@ main(int argc, char *argv[]) > char *socket_path = NULL; > progname = argv[0]; > > - while ((opt = getopt(argc, argv, "hfv")) != -1) { > + while ((opt = getopt(argc, argv, "hfvt:")) != -1) { > switch (opt) { > case 'f': > daemonize = 0; > @@ -619,6 +621,15 @@ main(int argc, char *argv[]) > case 'v': > verbose = 1; > break; > + case 't': > + errno = 0; > + char *endptr = NULL; > + kill_timeout = strtoul(optarg, &endptr, 10); > + if (errno != 0 || *endptr != '\0' || kill_timeout == 0) { > + usage(); > + exit(EXIT_FAILURE); > + } > + break; > case 'h': > usage(); > exit(EXIT_SUCCESS); > @@ -668,10 +679,10 @@ main(int argc, char *argv[]) > > int nevents; > > + alarm(10); > for(;;) { > nevents = epoll_wait(epoll_fd, events, 1, -1); > if (nevents < 0 && errno == EINTR) { > - handle_forced_cleanup(); > continue; > } > bail_neg(nevents, "epoll_wait"); > @@ -688,7 +699,5 @@ main(int argc, char *argv[]) > handle_client((struct Client *)events[n].data.ptr); > } > } > - > - handle_forced_cleanup(); > } > } > diff --git a/qmeventd/qmeventd.h b/qmeventd/qmeventd.h > index 2cf1947..1b9cea1 100644 > --- a/qmeventd/qmeventd.h > +++ b/qmeventd/qmeventd.h > @@ -7,6 +7,7 @@ > */ > > #include <sys/syscall.h> > +#include <time.h> > > #ifndef __NR_pidfd_open > #define __NR_pidfd_open 434 > @@ -86,6 +87,7 @@ struct Client { > struct CleanupData { > pid_t pid; > int pidfd; > + time_t timeout; > }; > > void handle_qmp_handshake(struct Client *client); > -- > 2.30.2 > > > > _______________________________________________ > pve-devel mailing list > pve-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel > > > ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-22 8:24 ` Fabian Grünbichler @ 2022-09-22 11:31 ` Dominik Csapak 2022-09-22 12:01 ` Wolfgang Bumiller 0 siblings, 1 reply; 13+ messages in thread From: Dominik Csapak @ 2022-09-22 11:31 UTC (permalink / raw) To: Proxmox VE development discussion, Fabian Grünbichler [snip] >> -/* >> - * SIGALRM and cleanup handling >> - * >> - * terminate_client will set an alarm for 5 seconds and add its client's PID to >> - * the forced_cleanups list - when the timer expires, we iterate the list and >> - * attempt to issue SIGKILL to all processes which haven't yet stopped. >> - */ >> - >> -static void >> -alarm_handler(__attribute__((unused)) int signum) >> -{ >> - alarm_triggered = 1; >> -} >> - > > wasn't this intentionally decoupled like this? > > alarm_handler just sets the flag > actual force cleanup is conditionalized on the alarm having triggered, > but the cleanup happens outside of the signal handler.. > > is there a reason from switching away from these scheme? we don't need > to do the cleanup in the signal handler (timing is already plenty fuzzy > anyway ;)) no real reason, i found the code somewhat cleaner, but you're right, we probably want to keep that, and just trigger it regularly > >> static void >> sigkill(void *ptr, __attribute__((unused)) void *unused) >> { >> struct CleanupData data = *((struct CleanupData *)ptr); >> int err; >> >> + if (data.timeout > time(NULL)) { > > nit: current time / cutoff could be passed in via the currently unused > user_data parameter.. > make sense ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-22 11:31 ` Dominik Csapak @ 2022-09-22 12:01 ` Wolfgang Bumiller 2022-09-22 12:22 ` Dominik Csapak 0 siblings, 1 reply; 13+ messages in thread From: Wolfgang Bumiller @ 2022-09-22 12:01 UTC (permalink / raw) To: Dominik Csapak; +Cc: Proxmox VE development discussion, Fabian Grünbichler On Thu, Sep 22, 2022 at 01:31:49PM +0200, Dominik Csapak wrote: > [snip] > > > -/* > > > - * SIGALRM and cleanup handling > > > - * > > > - * terminate_client will set an alarm for 5 seconds and add its client's PID to > > > - * the forced_cleanups list - when the timer expires, we iterate the list and > > > - * attempt to issue SIGKILL to all processes which haven't yet stopped. > > > - */ > > > - > > > -static void > > > -alarm_handler(__attribute__((unused)) int signum) > > > -{ > > > - alarm_triggered = 1; > > > -} > > > - > > > > wasn't this intentionally decoupled like this? > > > > alarm_handler just sets the flag > > actual force cleanup is conditionalized on the alarm having triggered, > > but the cleanup happens outside of the signal handler.. > > > > is there a reason from switching away from these scheme? we don't need > > to do the cleanup in the signal handler (timing is already plenty fuzzy > > anyway ;)) > > no real reason, i found the code somewhat cleaner, but you're right, > we probably want to keep that, and just trigger it regularly From what I can tell the only point of this signal is to interrupt `epoll()` after a while to call the cleanup/kill handler since we only have a single worker here that needs to do some work after a timeout. Why not either: - set a bool instead of calling `alarm()` which causes the next `epoll()` call to use a timeout and call the cleanups if epoll turns up empty - or create a timerfd (timerfd_create(2)) in the beginning which we add to the epoll context and use `timerfd_settime(2)` in place of `alarm()`, which will also wake up the epoll call without having to add timeouts to it `alarm()` is just such a gross interface... In theory we'd also be able to ditch all of those `EINTR` loops as we wouldn't be expecting any interrupts anymore... (and if we did expect them, we could add a `signalfd(2)` to `epoll()` as well ;-) ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-22 12:01 ` Wolfgang Bumiller @ 2022-09-22 12:22 ` Dominik Csapak 2022-09-22 12:46 ` Wolfgang Bumiller 0 siblings, 1 reply; 13+ messages in thread From: Dominik Csapak @ 2022-09-22 12:22 UTC (permalink / raw) To: Wolfgang Bumiller Cc: Proxmox VE development discussion, Fabian Grünbichler On 9/22/22 14:01, Wolfgang Bumiller wrote: > On Thu, Sep 22, 2022 at 01:31:49PM +0200, Dominik Csapak wrote: >> [snip] >>>> -/* >>>> - * SIGALRM and cleanup handling >>>> - * >>>> - * terminate_client will set an alarm for 5 seconds and add its client's PID to >>>> - * the forced_cleanups list - when the timer expires, we iterate the list and >>>> - * attempt to issue SIGKILL to all processes which haven't yet stopped. >>>> - */ >>>> - >>>> -static void >>>> -alarm_handler(__attribute__((unused)) int signum) >>>> -{ >>>> - alarm_triggered = 1; >>>> -} >>>> - >>> >>> wasn't this intentionally decoupled like this? >>> >>> alarm_handler just sets the flag >>> actual force cleanup is conditionalized on the alarm having triggered, >>> but the cleanup happens outside of the signal handler.. >>> >>> is there a reason from switching away from these scheme? we don't need >>> to do the cleanup in the signal handler (timing is already plenty fuzzy >>> anyway ;)) >> >> no real reason, i found the code somewhat cleaner, but you're right, >> we probably want to keep that, and just trigger it regularly > > From what I can tell the only point of this signal is to interrupt > `epoll()` after a while to call the cleanup/kill handler since we only > have a single worker here that needs to do some work after a timeout. > > Why not either: > - set a bool instead of calling `alarm()` which causes the next > `epoll()` call to use a timeout and call the cleanups if epoll turns > up empty > - or create a timerfd (timerfd_create(2)) in the beginning which we > add to the epoll context and use `timerfd_settime(2)` in place of > `alarm()`, which will also wake up the epoll call without having to add > timeouts to it > > `alarm()` is just such a gross interface... > In theory we'd also be able to ditch all of those `EINTR` loops as we > wouldn't be expecting any interrupts anymore... (and if we did expect > them, we could add a `signalfd(2)` to `epoll()` as well ;-) first one sounds much simpler but the second one sounds much more elegant ;) i'll see what works/feels better couldn't we also directly add a new timerfd for each client that needs such a timeout instead of managing some list ? the cleanupdata could go into the even.data.ptr and we wouldn't have to do anything periodically, just handle the timeout when epoll wakes up? we probably would have to merge the client and clenaupdata structs so that we can see which is which, but that should not be that of a problem? ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-22 12:22 ` Dominik Csapak @ 2022-09-22 12:46 ` Wolfgang Bumiller 0 siblings, 0 replies; 13+ messages in thread From: Wolfgang Bumiller @ 2022-09-22 12:46 UTC (permalink / raw) To: Dominik Csapak; +Cc: Proxmox VE development discussion, Fabian Grünbichler On Thu, Sep 22, 2022 at 02:22:45PM +0200, Dominik Csapak wrote: > On 9/22/22 14:01, Wolfgang Bumiller wrote: > > On Thu, Sep 22, 2022 at 01:31:49PM +0200, Dominik Csapak wrote: > > > [snip] > > > > > -/* > > > > > - * SIGALRM and cleanup handling > > > > > - * > > > > > - * terminate_client will set an alarm for 5 seconds and add its client's PID to > > > > > - * the forced_cleanups list - when the timer expires, we iterate the list and > > > > > - * attempt to issue SIGKILL to all processes which haven't yet stopped. > > > > > - */ > > > > > - > > > > > -static void > > > > > -alarm_handler(__attribute__((unused)) int signum) > > > > > -{ > > > > > - alarm_triggered = 1; > > > > > -} > > > > > - > > > > > > > > wasn't this intentionally decoupled like this? > > > > > > > > alarm_handler just sets the flag > > > > actual force cleanup is conditionalized on the alarm having triggered, > > > > but the cleanup happens outside of the signal handler.. > > > > > > > > is there a reason from switching away from these scheme? we don't need > > > > to do the cleanup in the signal handler (timing is already plenty fuzzy > > > > anyway ;)) > > > > > > no real reason, i found the code somewhat cleaner, but you're right, > > > we probably want to keep that, and just trigger it regularly > > > > From what I can tell the only point of this signal is to interrupt > > `epoll()` after a while to call the cleanup/kill handler since we only > > have a single worker here that needs to do some work after a timeout. > > > > Why not either: > > - set a bool instead of calling `alarm()` which causes the next > > `epoll()` call to use a timeout and call the cleanups if epoll turns > > up empty > - or create a timerfd (timerfd_create(2)) in the beginning which we > > add to the epoll context and use `timerfd_settime(2)` in place of > > `alarm()`, which will also wake up the epoll call without having to add > > timeouts to it > > > > `alarm()` is just such a gross interface... > > In theory we'd also be able to ditch all of those `EINTR` loops as we > > wouldn't be expecting any interrupts anymore... (and if we did expect > > them, we could add a `signalfd(2)` to `epoll()` as well ;-) > > first one sounds much simpler but the second one sounds much more elegant ;) > i'll see what works/feels better > > couldn't we also directly add a new timerfd for each client that > needs such a timeout instead of managing some list ? I'm not really a fan of abusing kernel-side resources for a user-space scheduling problem ;-). If you want a per-client timeout, I'd much rather just have a list of points in time that epoll() should target. That list may well be `struct Client` itself if you want to merge the data as you described below. The `struct Client` could just receive a `STAILQ_ENTRY(Client) cleanup_entries;` member (queue(7), stailq(3)), and a cleanup time which is used to generate the timeout for `epoll()`, and on every wakeup, we can march through the already expired queue entries. > the cleanupdata could go into the even.data.ptr and we wouldn't > have to do anything periodically, just handle the timeout > when epoll wakes up? > > we probably would have to merge the client and clenaupdata structs > so that we can see which is which, but that should not be > that of a problem? ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s Dominik Csapak [not found] ` <<20220921124911.3224970-2-d.csapak@proxmox.com> @ 2022-09-22 11:51 ` Thomas Lamprecht 1 sibling, 0 replies; 13+ messages in thread From: Thomas Lamprecht @ 2022-09-22 11:51 UTC (permalink / raw) To: Proxmox VE development discussion, Dominik Csapak Am 21/09/2022 um 14:49 schrieb Dominik Csapak: > + fprintf(stderr, " -t timeout kill timeout (default: %ds)\n", DEFAULT_KILL_TIMEOUT); just a nit, as you probably send a v2 for Fabian's comments anyway: Please do s/timeout/<s>/ i.e.: -t <s> kill timeo.. That has two advantages, first the user actually knows the time unit and second, the context whitespace changes aren't required anymore. Did not checked the rest more closely, but this stuck out when skimming. ^ permalink raw reply [flat|nested] 13+ messages in thread
* [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds 2022-09-21 12:49 [pve-devel] [PATCH qemu-server 0/3] qmeventd: improve shutdown behaviour Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s Dominik Csapak @ 2022-09-21 12:49 ` Dominik Csapak 2022-09-22 10:14 ` Matthias Heiserer 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 3/3] qmeventd: send QMP 'quit' command instead of SIGTERM Dominik Csapak 2 siblings, 1 reply; 13+ messages in thread From: Dominik Csapak @ 2022-09-21 12:49 UTC (permalink / raw) To: pve-devel instead of always sending a SIGKILL to the target pid. It was not that much of a problem since the timeout previously was 5 seconds and we used pifds where possible, thus the chance of killing the wrong process was rather slim. Now we increased the timeout to 60s which makes the race a bit more likely (when not using pidfds), so remove it from the 'forced_cleanups' list when the normal cleanup succeeds. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- qmeventd/qmeventd.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c index e9ff5b3..de5efd0 100644 --- a/qmeventd/qmeventd.c +++ b/qmeventd/qmeventd.c @@ -415,6 +415,25 @@ cleanup_qemu_client(struct Client *client) } } +static void +remove_cleanup_data(void *ptr, void *client_ptr) { + struct CleanupData *data = (struct CleanupData *)ptr; + struct Client *client = (struct Client *)client_ptr; + + if (data->pid == client->pid) { + forced_cleanups = g_slist_remove(forced_cleanups, ptr); + free(ptr); + } +} + +static void +remove_from_forced_cleanup(struct Client *client) { + if (g_slist_length(forced_cleanups) > 0) { + VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid); + g_slist_foreach(forced_cleanups, remove_cleanup_data, client); + } +} + void cleanup_client(struct Client *client) { @@ -441,6 +460,7 @@ cleanup_client(struct Client *client) break; } + remove_from_forced_cleanup(client); free(client); } -- 2.30.2 ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds Dominik Csapak @ 2022-09-22 10:14 ` Matthias Heiserer 2022-09-22 11:37 ` Dominik Csapak 0 siblings, 1 reply; 13+ messages in thread From: Matthias Heiserer @ 2022-09-22 10:14 UTC (permalink / raw) To: Proxmox VE development discussion, Dominik Csapak On 21.09.2022 14:49, Dominik Csapak wrote: > instead of always sending a SIGKILL to the target pid. > It was not that much of a problem since the timeout previously was 5 > seconds and we used pifds where possible, thus the chance of killing the > wrong process was rather slim. > > Now we increased the timeout to 60s which makes the race a bit more likely > (when not using pidfds), so remove it from the 'forced_cleanups' list when > the normal cleanup succeeds. > > Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> > --- > qmeventd/qmeventd.c | 20 ++++++++++++++++++++ > 1 file changed, 20 insertions(+) > > diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c > index e9ff5b3..de5efd0 100644 > --- a/qmeventd/qmeventd.c > +++ b/qmeventd/qmeventd.c > @@ -415,6 +415,25 @@ cleanup_qemu_client(struct Client *client) > } > } > > +static void > +remove_cleanup_data(void *ptr, void *client_ptr) { Not that it really matters, but is there a reason we don't use remove_cleanup_data(struct CleanupData *ptr, struct Client *client_ptr) and let the caller deal with types? > + struct CleanupData *data = (struct CleanupData *)ptr; > + struct Client *client = (struct Client *)client_ptr; > + > + if (data->pid == client->pid) { > + forced_cleanups = g_slist_remove(forced_cleanups, ptr); > + free(ptr); > + } > +} > + > +static void > +remove_from_forced_cleanup(struct Client *client) { > + if (g_slist_length(forced_cleanups) > 0) { > + VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid); > + g_slist_foreach(forced_cleanups, remove_cleanup_data, client); that is, here `(void (*)(void*, void*)) remove_cleanup_data`. Seems a bit cleaner to me. > + } > +} > + > void > cleanup_client(struct Client *client) > { > @@ -441,6 +460,7 @@ cleanup_client(struct Client *client) > break; > } > > + remove_from_forced_cleanup(client); > free(client); > } > ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds 2022-09-22 10:14 ` Matthias Heiserer @ 2022-09-22 11:37 ` Dominik Csapak 2022-09-23 7:58 ` Wolfgang Bumiller 0 siblings, 1 reply; 13+ messages in thread From: Dominik Csapak @ 2022-09-22 11:37 UTC (permalink / raw) To: Matthias Heiserer, Proxmox VE development discussion On 9/22/22 12:14, Matthias Heiserer wrote: > On 21.09.2022 14:49, Dominik Csapak wrote: >> instead of always sending a SIGKILL to the target pid. >> It was not that much of a problem since the timeout previously was 5 >> seconds and we used pifds where possible, thus the chance of killing the >> wrong process was rather slim. >> >> Now we increased the timeout to 60s which makes the race a bit more likely >> (when not using pidfds), so remove it from the 'forced_cleanups' list when >> the normal cleanup succeeds. >> >> Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> >> --- >> qmeventd/qmeventd.c | 20 ++++++++++++++++++++ >> 1 file changed, 20 insertions(+) >> >> diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c >> index e9ff5b3..de5efd0 100644 >> --- a/qmeventd/qmeventd.c >> +++ b/qmeventd/qmeventd.c >> @@ -415,6 +415,25 @@ cleanup_qemu_client(struct Client *client) >> } >> } >> +static void >> +remove_cleanup_data(void *ptr, void *client_ptr) { > Not that it really matters, but is there a reason we don't use > remove_cleanup_data(struct CleanupData *ptr, struct Client *client_ptr) > and let the caller deal with types? >> + struct CleanupData *data = (struct CleanupData *)ptr; >> + struct Client *client = (struct Client *)client_ptr; >> + >> + if (data->pid == client->pid) { >> + forced_cleanups = g_slist_remove(forced_cleanups, ptr); >> + free(ptr); >> + } >> +} >> + > +static void >> +remove_from_forced_cleanup(struct Client *client) { >> + if (g_slist_length(forced_cleanups) > 0) { >> + VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid); >> + g_slist_foreach(forced_cleanups, remove_cleanup_data, client); > that is, here `(void (*)(void*, void*)) remove_cleanup_data`. Seems a bit cleaner to me. >> + } >> +} >> + >> void >> cleanup_client(struct Client *client) >> { >> @@ -441,6 +460,7 @@ cleanup_client(struct Client *client) >> break; >> } >> + remove_from_forced_cleanup(client); >> free(client); >> } > i just kept the style we use for the existing call to *_foreach. my guess is that the intention was to keep the function close to what glib defines (although that uses 'gpointer'). doing as you suggested introduces a big cast that is confusing to read IMHO (for people not that familiar with c at least ;) ) that could be solved with casting to 'GFunc' (not sure if that's considered good style?) but in the end, i don't have strong feeling either way ^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds 2022-09-22 11:37 ` Dominik Csapak @ 2022-09-23 7:58 ` Wolfgang Bumiller 0 siblings, 0 replies; 13+ messages in thread From: Wolfgang Bumiller @ 2022-09-23 7:58 UTC (permalink / raw) To: Dominik Csapak; +Cc: Matthias Heiserer, Proxmox VE development discussion On Thu, Sep 22, 2022 at 01:37:57PM +0200, Dominik Csapak wrote: > On 9/22/22 12:14, Matthias Heiserer wrote: > > On 21.09.2022 14:49, Dominik Csapak wrote: > > > instead of always sending a SIGKILL to the target pid. > > > It was not that much of a problem since the timeout previously was 5 > > > seconds and we used pifds where possible, thus the chance of killing the > > > wrong process was rather slim. > > > > > > Now we increased the timeout to 60s which makes the race a bit more likely > > > (when not using pidfds), so remove it from the 'forced_cleanups' list when > > > the normal cleanup succeeds. > > > > > > Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> > > > --- > > > qmeventd/qmeventd.c | 20 ++++++++++++++++++++ > > > 1 file changed, 20 insertions(+) > > > > > > diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c > > > index e9ff5b3..de5efd0 100644 > > > --- a/qmeventd/qmeventd.c > > > +++ b/qmeventd/qmeventd.c > > > @@ -415,6 +415,25 @@ cleanup_qemu_client(struct Client *client) > > > } > > > } > > > +static void > > > +remove_cleanup_data(void *ptr, void *client_ptr) { > > Not that it really matters, but is there a reason we don't use > > remove_cleanup_data(struct CleanupData *ptr, struct Client *client_ptr) > > and let the caller deal with types? > > > + struct CleanupData *data = (struct CleanupData *)ptr; > > > + struct Client *client = (struct Client *)client_ptr; > > > + > > > + if (data->pid == client->pid) { > > > + forced_cleanups = g_slist_remove(forced_cleanups, ptr); > > > + free(ptr); > > > + } > > > +} > > > + > +static void > > > +remove_from_forced_cleanup(struct Client *client) { > > > + if (g_slist_length(forced_cleanups) > 0) { > > > + VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid); > > > + g_slist_foreach(forced_cleanups, remove_cleanup_data, client); > > that is, here `(void (*)(void*, void*)) remove_cleanup_data`. Seems a bit cleaner to me. > > > + } > > > +} > > > + > > > void > > > cleanup_client(struct Client *client) > > > { > > > @@ -441,6 +460,7 @@ cleanup_client(struct Client *client) > > > break; > > > } > > > + remove_from_forced_cleanup(client); > > > free(client); > > > } > > > > i just kept the style we use for the existing call to *_foreach. > > my guess is that the intention was to keep the function close to what glib defines > (although that uses 'gpointer'). doing as you suggested introduces a big > cast that is confusing to read IMHO (for people not that familiar with c at least ;) ) > that could be solved with casting to 'GFunc' (not sure if that's considered good style?) > but in the end, i don't have strong feeling either way Just to follow this up: The main argument I can give for the current style is that a cast works for any function signature and therefor removes one possible compile-time check. Sure, you can mess up the parameter cast in the function body, but that's arguably less likely. Also, since they're usually `void*` you wouldn't actually *need* to repeat the type in the function body: -struct CleanupData *data = (struct CleanupData *)ptr; +struct CleanupData *data = ptr; is actually sufficient in C. ^ permalink raw reply [flat|nested] 13+ messages in thread
* [pve-devel] [PATCH qemu-server 3/3] qmeventd: send QMP 'quit' command instead of SIGTERM 2022-09-21 12:49 [pve-devel] [PATCH qemu-server 0/3] qmeventd: improve shutdown behaviour Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds Dominik Csapak @ 2022-09-21 12:49 ` Dominik Csapak 2 siblings, 0 replies; 13+ messages in thread From: Dominik Csapak @ 2022-09-21 12:49 UTC (permalink / raw) To: pve-devel this is functionally the same, but sending SIGTERM has the ugly side effect of printing the following to the log: > QEMU[<pid>]: kvm: terminating on signal 15 from pid <pid> (/usr/sbin/qmeventd) while sending a QMP quit command does not. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- qmeventd/qmeventd.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/qmeventd/qmeventd.c b/qmeventd/qmeventd.c index de5efd0..3793414 100644 --- a/qmeventd/qmeventd.c +++ b/qmeventd/qmeventd.c @@ -286,8 +286,10 @@ handle_qmp_return(struct Client *client, struct json_object *data, bool error) VERBOSE_PRINT("%s: QMP handshake complete\n", client->qemu.vmid); break; - case STATE_IDLE: + // we expect an empty return object after sending quit case STATE_TERMINATING: + break; + case STATE_IDLE: VERBOSE_PRINT("%s: spurious return value received\n", client->qemu.vmid); break; @@ -491,8 +493,14 @@ terminate_client(struct Client *client) } } - int err = kill(client->pid, SIGTERM); - log_neg(err, "kill"); + // try to send a 'quit' command first, fallback to SIGTERM of the pid + static const char qmp_quit_command[] = "{\"execute\":\"quit\"}\n"; + VERBOSE_PRINT("%s: sending 'quit' via QMP\n", client->qemu.vmid); + if (!must_write(client->fd, qmp_quit_command, sizeof(qmp_quit_command) - 1)) { + VERBOSE_PRINT("%s: sending 'SIGTERM' to pid %d\n", client->qemu.vmid, client->pid); + int err = kill(client->pid, SIGTERM); + log_neg(err, "kill"); + } time_t timeout = time(NULL) + kill_timeout; -- 2.30.2 ^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2022-09-23 7:58 UTC | newest] Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2022-09-21 12:49 [pve-devel] [PATCH qemu-server 0/3] qmeventd: improve shutdown behaviour Dominik Csapak 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 1/3] qmeventd: rework 'forced_cleanup' handling and set timeout to 60s Dominik Csapak [not found] ` <<20220921124911.3224970-2-d.csapak@proxmox.com> 2022-09-22 8:24 ` Fabian Grünbichler 2022-09-22 11:31 ` Dominik Csapak 2022-09-22 12:01 ` Wolfgang Bumiller 2022-09-22 12:22 ` Dominik Csapak 2022-09-22 12:46 ` Wolfgang Bumiller 2022-09-22 11:51 ` Thomas Lamprecht 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 2/3] qmeventd: cancel 'forced cleanup' when normal cleanup succeeds Dominik Csapak 2022-09-22 10:14 ` Matthias Heiserer 2022-09-22 11:37 ` Dominik Csapak 2022-09-23 7:58 ` Wolfgang Bumiller 2022-09-21 12:49 ` [pve-devel] [PATCH qemu-server 3/3] qmeventd: send QMP 'quit' command instead of SIGTERM Dominik Csapak
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox