From: Florian Forster Date: Tue, 28 Nov 2023 18:31:11 +0000 (+0100) Subject: procevent plugin: remove use of a nested flexible array member. X-Git-Tag: 6.0.0-rc0~42^2~9 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d5eeca856ceece386a9143993bc29d98925f6489;p=thirdparty%2Fcollectd.git procevent plugin: remove use of a nested flexible array member. The previous code used an ad-hoc struct to construct or parse a Netlink message. This relied on allocating a field _after_ the struct with the flexible array member, which is prohibited by the C standard, leading to compiler warnings. --- diff --git a/src/procevent.c b/src/procevent.c index 03d9081f3..7556b42e1 100644 --- a/src/procevent.c +++ b/src/procevent.c @@ -687,45 +687,49 @@ static int nl_connect() { } static int set_proc_ev_listen(bool enable) { - struct __attribute__((aligned(NLMSG_ALIGNTO))) { - struct nlmsghdr nl_hdr; - struct __attribute__((__packed__)) { - struct cn_msg cn_msg; - enum proc_cn_mcast_op cn_mcast; - }; - } nlcn_msg; - - memset(&nlcn_msg, 0, sizeof(nlcn_msg)); - nlcn_msg.nl_hdr.nlmsg_len = sizeof(nlcn_msg); - nlcn_msg.nl_hdr.nlmsg_pid = getpid(); - nlcn_msg.nl_hdr.nlmsg_type = NLMSG_DONE; - - nlcn_msg.cn_msg.id.idx = CN_IDX_PROC; - nlcn_msg.cn_msg.id.val = CN_VAL_PROC; - nlcn_msg.cn_msg.len = sizeof(enum proc_cn_mcast_op); - - nlcn_msg.cn_mcast = enable ? PROC_CN_MCAST_LISTEN : PROC_CN_MCAST_IGNORE; - - int rc = send(nl_sock, &nlcn_msg, sizeof(nlcn_msg), 0); +#define MSG_SIZE \ + sizeof(struct nlmsghdr) + sizeof(struct cn_msg) + \ + sizeof(enum proc_cn_mcast_op) + uint8_t msg[MSG_SIZE] = {0}; + size_t offset = 0; + + memcpy(&msg[offset], + &(struct nlmsghdr){ + .nlmsg_len = MSG_SIZE, + .nlmsg_pid = getpid(), + .nlmsg_type = NLMSG_DONE, + }, + sizeof(struct nlmsghdr)); + offset += sizeof(struct nlmsghdr); + + memcpy(&msg[offset], + &(struct cn_msg){ + .id.idx = CN_IDX_PROC, + .id.val = CN_VAL_PROC, + .len = sizeof(enum proc_cn_mcast_op), + }, + sizeof(struct cn_msg)); + offset += sizeof(struct cn_msg); + + memcpy(&msg[offset], + &(enum proc_cn_mcast_op){enable ? PROC_CN_MCAST_LISTEN + : PROC_CN_MCAST_IGNORE}, + sizeof(enum proc_cn_mcast_op)); + + int rc = send(nl_sock, msg, MSG_SIZE, 0); if (rc == -1) { - ERROR("procevent plugin: subscribing to netlink process events failed: %d", - errno); + ERROR("procevent plugin: subscribing to netlink process events failed: %s", + STRERRNO); return -1; } return 0; +#undef MSG_SIZE } // Read from netlink socket and write to ring buffer static int read_event() { int recv_flags = MSG_DONTWAIT; - struct __attribute__((aligned(NLMSG_ALIGNTO))) { - struct nlmsghdr nl_hdr; - struct __attribute__((__packed__)) { - struct cn_msg cn_msg; - struct proc_event proc_ev; - }; - } nlcn_msg; if (nl_sock == -1) return 0; @@ -740,11 +744,18 @@ static int read_event() { pthread_mutex_unlock(&procevent_thread_lock); - int status = recv(nl_sock, &nlcn_msg, sizeof(nlcn_msg), recv_flags); + size_t msg_size = (size_t)sysconf(_SC_PAGESIZE); + if (msg_size < 8192) { + msg_size = 8192; + } + uint8_t msg[msg_size]; + memset(msg, 0, msg_size); + int status = recv(nl_sock, msg, msg_size, recv_flags); if (status == 0) { return 0; - } else if (status < 0) { + } + if (status < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { pthread_mutex_lock(&procevent_data_lock); @@ -769,22 +780,39 @@ static int read_event() { continue; } } + msg_size = (size_t)status; + size_t expected_size = sizeof(struct nlmsghdr) + sizeof(struct cn_msg) + + sizeof(struct proc_event); + if (msg_size < expected_size) { + ERROR("procevent plugin: received %zu bytes, expected %zu", msg_size, + expected_size); + return -EPROTO; + } + if (msg_size > expected_size) { + DEBUG("procevent plugin: received %zu bytes, expected %zu. This may " + "indicate the we're receiving multiple responses per datagram, " + "leading to dropped metrics.", + msg_size, expected_size); + } // We successfully received a message, so don't block on the next // read in case there are more (and if there aren't, it will be // handled above in the EWOULDBLOCK error-checking) recv_flags = MSG_DONTWAIT; + struct nlmsghdr *nl_hdr = (struct nlmsghdr *)&msg[0]; + struct cn_msg *cn_hdr = (struct cn_msg *)NLMSG_DATA(nl_hdr); + struct proc_event *proc_ev = (struct proc_event *)&cn_hdr->data[0]; + int proc_id = -1; int proc_status = -1; - - switch (nlcn_msg.proc_ev.what) { + switch (proc_ev->what) { case PROC_EVENT_EXEC: proc_status = PROCEVENT_STARTED; - proc_id = nlcn_msg.proc_ev.event_data.exec.process_pid; + proc_id = proc_ev->event_data.exec.process_pid; break; case PROC_EVENT_EXIT: - proc_id = nlcn_msg.proc_ev.event_data.exit.process_pid; + proc_id = proc_ev->event_data.exit.process_pid; proc_status = PROCEVENT_EXITED; break; default: @@ -792,41 +820,42 @@ static int read_event() { break; } + if (proc_status == -1) { + continue; + } + // If we're interested in this process status event, place the event // in the ring buffer for consumption by the dequeue (dispatch) thread. + pthread_mutex_lock(&procevent_data_lock); - if (proc_status != -1) { - pthread_mutex_lock(&procevent_data_lock); - - int next = ring.head + 1; - if (next >= ring.maxLen) - next = 0; - - if (next == ring.tail) { - // Buffer is full, signal the dequeue thread to process the buffer - // and clean it out, and then sleep - WARNING("procevent plugin: ring buffer full"); + int next = ring.head + 1; + if (next >= ring.maxLen) + next = 0; - pthread_cond_signal(&procevent_cond); - pthread_mutex_unlock(&procevent_data_lock); + if (next == ring.tail) { + // Buffer is full, signal the dequeue thread to process the buffer + // and clean it out, and then sleep + WARNING("procevent plugin: ring buffer full"); - usleep(1000); - continue; - } else { - DEBUG("procevent plugin: Process %d status is now %s at %llu", proc_id, - (proc_status == PROCEVENT_EXITED ? "EXITED" : "STARTED"), - (unsigned long long)cdtime()); + pthread_cond_signal(&procevent_cond); + pthread_mutex_unlock(&procevent_data_lock); - ring.buffer[ring.head][RBUF_PROC_ID_INDEX] = proc_id; - ring.buffer[ring.head][RBUF_PROC_STATUS_INDEX] = proc_status; - ring.buffer[ring.head][RBUF_TIME_INDEX] = cdtime(); + usleep(1000); + continue; + } else { + DEBUG("procevent plugin: Process %d status is now %s at %llu", proc_id, + (proc_status == PROCEVENT_EXITED ? "EXITED" : "STARTED"), + (unsigned long long)cdtime()); - ring.head = next; - } + ring.buffer[ring.head][RBUF_PROC_ID_INDEX] = proc_id; + ring.buffer[ring.head][RBUF_PROC_STATUS_INDEX] = proc_status; + ring.buffer[ring.head][RBUF_TIME_INDEX] = cdtime(); - pthread_mutex_unlock(&procevent_data_lock); + ring.head = next; } - } + + pthread_mutex_unlock(&procevent_data_lock); + } // while true return 0; }