2024-06-13 14:36:49 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
|
|
|
/* PASST - Plug A Simple Socket Transport
|
|
|
|
* for qemu/UNIX domain socket mode
|
|
|
|
*
|
|
|
|
* PASTA - Pack A Subtle Tap Abstraction
|
|
|
|
* for network namespace/tap device mode
|
|
|
|
*
|
|
|
|
* tcp_buf.c - TCP L2 buffer management functions
|
|
|
|
*
|
|
|
|
* Copyright Red Hat
|
|
|
|
* Author: Stefano Brivio <sbrivio@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
|
2024-11-05 20:07:44 -05:00
|
|
|
#include <netinet/tcp.h>
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
#include "util.h"
|
|
|
|
#include "ip.h"
|
|
|
|
#include "iov.h"
|
|
|
|
#include "passt.h"
|
|
|
|
#include "tap.h"
|
|
|
|
#include "siphash.h"
|
|
|
|
#include "inany.h"
|
|
|
|
#include "tcp_conn.h"
|
|
|
|
#include "tcp_internal.h"
|
|
|
|
#include "tcp_buf.h"
|
|
|
|
|
|
|
|
#define TCP_FRAMES_MEM 128
|
|
|
|
#define TCP_FRAMES \
|
2024-06-13 14:36:53 +02:00
|
|
|
(c->mode == MODE_PASTA ? 1 : TCP_FRAMES_MEM)
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
/* Static buffers */
|
2024-10-28 22:14:00 -04:00
|
|
|
|
|
|
|
/* Ethernet header for IPv4 and IPv6 frames */
|
2024-06-13 14:36:49 +02:00
|
|
|
static struct ethhdr tcp4_eth_src;
|
2024-10-28 22:14:00 -04:00
|
|
|
static struct ethhdr tcp6_eth_src;
|
|
|
|
|
|
|
|
static struct tap_hdr tcp_payload_tap_hdr[TCP_FRAMES_MEM];
|
|
|
|
|
|
|
|
/* IP headers for IPv4 and IPv6 */
|
|
|
|
struct iphdr tcp4_payload_ip[TCP_FRAMES_MEM];
|
|
|
|
struct ipv6hdr tcp6_payload_ip[TCP_FRAMES_MEM];
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
/* TCP segments with payload for IPv4 and IPv6 frames */
|
|
|
|
static struct tcp_payload_t tcp_payload[TCP_FRAMES_MEM];
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
static_assert(MSS4 <= sizeof(tcp_payload[0].data), "MSS4 is greater than 65516");
|
|
|
|
static_assert(MSS6 <= sizeof(tcp_payload[0].data), "MSS6 is greater than 65516");
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
/* References tracking the owner connection of frames in the tap outqueue */
|
2024-10-28 22:14:00 -04:00
|
|
|
static struct tcp_tap_conn *tcp_frame_conns[TCP_FRAMES_MEM];
|
|
|
|
static unsigned int tcp_payload_used;
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
/* recvmsg()/sendmsg() data for tap */
|
|
|
|
static struct iovec iov_sock [TCP_FRAMES_MEM + 1];
|
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
static struct iovec tcp_l2_iov[TCP_FRAMES_MEM][TCP_NUM_IOVS];
|
|
|
|
|
2024-06-13 14:36:49 +02:00
|
|
|
/**
|
|
|
|
* tcp_update_l2_buf() - Update Ethernet header buffers with addresses
|
|
|
|
* @eth_d: Ethernet destination address, NULL if unchanged
|
|
|
|
* @eth_s: Ethernet source address, NULL if unchanged
|
|
|
|
*/
|
|
|
|
void tcp_update_l2_buf(const unsigned char *eth_d, const unsigned char *eth_s)
|
|
|
|
{
|
|
|
|
eth_update_mac(&tcp4_eth_src, eth_d, eth_s);
|
|
|
|
eth_update_mac(&tcp6_eth_src, eth_d, eth_s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-10-28 22:14:00 -04:00
|
|
|
* tcp_sock_iov_init() - Initialise scatter-gather L2 buffers for IPv4 sockets
|
2024-06-13 14:36:49 +02:00
|
|
|
* @c: Execution context
|
|
|
|
*/
|
2024-10-28 22:14:00 -04:00
|
|
|
void tcp_sock_iov_init(const struct ctx *c)
|
2024-06-13 14:36:49 +02:00
|
|
|
{
|
2024-10-28 22:14:00 -04:00
|
|
|
struct ipv6hdr ip6 = L2_BUF_IP6_INIT(IPPROTO_TCP);
|
2024-06-13 14:36:49 +02:00
|
|
|
struct iphdr iph = L2_BUF_IP4_INIT(IPPROTO_TCP);
|
|
|
|
int i;
|
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
tcp6_eth_src.h_proto = htons_constant(ETH_P_IPV6);
|
2024-06-13 14:36:49 +02:00
|
|
|
tcp4_eth_src.h_proto = htons_constant(ETH_P_IP);
|
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
for (i = 0; i < ARRAY_SIZE(tcp_payload); i++) {
|
|
|
|
tcp6_payload_ip[i] = ip6;
|
2024-06-13 14:36:49 +02:00
|
|
|
tcp4_payload_ip[i] = iph;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < TCP_FRAMES_MEM; i++) {
|
2024-10-28 22:14:00 -04:00
|
|
|
struct iovec *iov = tcp_l2_iov[i];
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
iov[TCP_IOV_TAP] = tap_hdr_iov(c, &tcp_payload_tap_hdr[i]);
|
2024-10-28 22:13:59 -04:00
|
|
|
iov[TCP_IOV_ETH].iov_len = sizeof(struct ethhdr);
|
2024-10-28 22:14:00 -04:00
|
|
|
iov[TCP_IOV_PAYLOAD].iov_base = &tcp_payload[i];
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_revert_seq() - Revert affected conn->seq_to_tap after failed transmission
|
2024-07-24 13:31:09 +10:00
|
|
|
* @ctx: Execution context
|
|
|
|
* @conns: Array of connection pointers corresponding to queued frames
|
|
|
|
* @frames: Two-dimensional array containing queued frames with sub-iovs
|
|
|
|
* @num_frames: Number of entries in the two arrays to be compared
|
2024-06-13 14:36:49 +02:00
|
|
|
*/
|
2024-09-18 11:53:05 +10:00
|
|
|
static void tcp_revert_seq(const struct ctx *c, struct tcp_tap_conn **conns,
|
2024-07-24 13:31:09 +10:00
|
|
|
struct iovec (*frames)[TCP_NUM_IOVS], int num_frames)
|
2024-06-13 14:36:49 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_frames; i++) {
|
|
|
|
const struct tcphdr *th = frames[i][TCP_IOV_PAYLOAD].iov_base;
|
|
|
|
struct tcp_tap_conn *conn = conns[i];
|
|
|
|
uint32_t seq = ntohl(th->seq);
|
2024-07-24 13:31:09 +10:00
|
|
|
uint32_t peek_offset;
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
if (SEQ_LE(conn->seq_to_tap, seq))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
conn->seq_to_tap = seq;
|
2024-07-24 13:31:09 +10:00
|
|
|
peek_offset = conn->seq_to_tap - conn->seq_ack_from_tap;
|
|
|
|
if (tcp_set_peek_offset(conn->sock, peek_offset))
|
|
|
|
tcp_rst(c, conn);
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-11-05 20:07:44 -05:00
|
|
|
* tcp_payload_flush() - Send out buffers for segments with data or flags
|
2024-06-13 14:36:49 +02:00
|
|
|
* @c: Execution context
|
|
|
|
*/
|
2024-09-18 11:53:05 +10:00
|
|
|
void tcp_payload_flush(const struct ctx *c)
|
2024-06-13 14:36:49 +02:00
|
|
|
{
|
|
|
|
size_t m;
|
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
m = tap_send_frames(c, &tcp_l2_iov[0][0], TCP_NUM_IOVS,
|
|
|
|
tcp_payload_used);
|
|
|
|
if (m != tcp_payload_used) {
|
|
|
|
tcp_revert_seq(c, &tcp_frame_conns[m], &tcp_l2_iov[m],
|
|
|
|
tcp_payload_used - m);
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
2024-10-28 22:14:00 -04:00
|
|
|
tcp_payload_used = 0;
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
|
2024-11-22 17:43:36 +01:00
|
|
|
/**
|
|
|
|
* tcp_buf_fill_headers() - Fill 802.3, IP, TCP headers in pre-cooked buffers
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @iov: Pointer to an array of iovec of TCP pre-cooked buffers
|
|
|
|
* @check: Checksum, if already known
|
|
|
|
* @seq: Sequence number for this segment
|
|
|
|
* @no_tcp_csum: Do not set TCP checksum
|
|
|
|
*/
|
|
|
|
static void tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
|
2024-11-27 14:54:07 +11:00
|
|
|
struct iovec *iov, const uint16_t *check,
|
|
|
|
uint32_t seq, bool no_tcp_csum)
|
2024-11-22 17:43:36 +01:00
|
|
|
{
|
2024-11-27 14:54:07 +11:00
|
|
|
struct iov_tail tail = IOV_TAIL(&iov[TCP_IOV_PAYLOAD], 1, 0);
|
|
|
|
struct tcphdr *th = IOV_REMOVE_HEADER(&tail, struct tcphdr);
|
2024-11-27 14:54:09 +11:00
|
|
|
struct tap_hdr *taph = iov[TCP_IOV_TAP].iov_base;
|
2024-11-22 17:43:36 +01:00
|
|
|
const struct flowside *tapside = TAPFLOW(conn);
|
|
|
|
const struct in_addr *a4 = inany_v4(&tapside->oaddr);
|
2024-11-27 14:54:09 +11:00
|
|
|
struct ipv6hdr *ip6h = NULL;
|
|
|
|
struct iphdr *ip4h = NULL;
|
2024-11-22 17:43:36 +01:00
|
|
|
|
2024-11-27 14:54:09 +11:00
|
|
|
if (a4)
|
|
|
|
ip4h = iov[TCP_IOV_IP].iov_base;
|
|
|
|
else
|
|
|
|
ip6h = iov[TCP_IOV_IP].iov_base;
|
|
|
|
|
|
|
|
tcp_fill_headers(conn, taph, ip4h, ip6h, th, &tail,
|
|
|
|
check, seq, no_tcp_csum);
|
2024-11-22 17:43:36 +01:00
|
|
|
}
|
|
|
|
|
2024-06-13 14:36:49 +02:00
|
|
|
/**
|
|
|
|
* tcp_buf_send_flag() - Send segment with flags to tap (no payload)
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @flags: TCP flags: if not set, send segment only if ACK is due
|
|
|
|
*
|
|
|
|
* Return: negative error code on connection reset, 0 otherwise
|
|
|
|
*/
|
2024-09-18 11:53:05 +10:00
|
|
|
int tcp_buf_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
|
2024-06-13 14:36:49 +02:00
|
|
|
{
|
2024-11-05 20:07:44 -05:00
|
|
|
struct tcp_payload_t *payload;
|
2024-06-13 14:36:49 +02:00
|
|
|
struct iovec *iov;
|
|
|
|
size_t optlen;
|
|
|
|
size_t l4len;
|
|
|
|
uint32_t seq;
|
|
|
|
int ret;
|
|
|
|
|
2024-11-05 20:07:44 -05:00
|
|
|
iov = tcp_l2_iov[tcp_payload_used];
|
2024-10-28 22:13:59 -04:00
|
|
|
if (CONN_V4(conn)) {
|
2024-11-05 20:07:44 -05:00
|
|
|
iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_payload_ip[tcp_payload_used]);
|
2024-10-28 22:13:59 -04:00
|
|
|
iov[TCP_IOV_ETH].iov_base = &tcp4_eth_src;
|
|
|
|
} else {
|
2024-11-05 20:07:44 -05:00
|
|
|
iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_payload_ip[tcp_payload_used]);
|
2024-10-28 22:13:59 -04:00
|
|
|
iov[TCP_IOV_ETH].iov_base = &tcp6_eth_src;
|
|
|
|
}
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
payload = iov[TCP_IOV_PAYLOAD].iov_base;
|
2024-06-13 14:36:49 +02:00
|
|
|
seq = conn->seq_to_tap;
|
|
|
|
ret = tcp_prepare_flags(c, conn, flags, &payload->th,
|
2024-11-05 20:07:44 -05:00
|
|
|
(struct tcp_syn_opts *)&payload->data, &optlen);
|
2024-10-28 22:14:00 -04:00
|
|
|
if (ret <= 0)
|
2024-06-13 14:36:49 +02:00
|
|
|
return ret;
|
|
|
|
|
2024-11-05 20:07:44 -05:00
|
|
|
tcp_payload_used++;
|
2024-11-22 17:43:36 +01:00
|
|
|
l4len = optlen + sizeof(struct tcphdr);
|
2024-06-13 14:36:49 +02:00
|
|
|
iov[TCP_IOV_PAYLOAD].iov_len = l4len;
|
2024-11-27 14:54:07 +11:00
|
|
|
tcp_l2_buf_fill_headers(conn, iov, NULL, seq, false);
|
2024-11-22 17:43:36 +01:00
|
|
|
|
2024-06-13 14:36:49 +02:00
|
|
|
if (flags & DUP_ACK) {
|
2024-11-05 20:07:44 -05:00
|
|
|
struct iovec *dup_iov = tcp_l2_iov[tcp_payload_used++];
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:13:59 -04:00
|
|
|
memcpy(dup_iov[TCP_IOV_TAP].iov_base, iov[TCP_IOV_TAP].iov_base,
|
|
|
|
iov[TCP_IOV_TAP].iov_len);
|
|
|
|
dup_iov[TCP_IOV_ETH].iov_base = iov[TCP_IOV_ETH].iov_base;
|
|
|
|
dup_iov[TCP_IOV_IP] = iov[TCP_IOV_IP];
|
|
|
|
memcpy(dup_iov[TCP_IOV_PAYLOAD].iov_base,
|
|
|
|
iov[TCP_IOV_PAYLOAD].iov_base, l4len);
|
|
|
|
dup_iov[TCP_IOV_PAYLOAD].iov_len = l4len;
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
|
2024-11-05 20:07:44 -05:00
|
|
|
if (tcp_payload_used > TCP_FRAMES_MEM - 2)
|
|
|
|
tcp_payload_flush(c);
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_data_to_tap() - Finalise (queue) highest-numbered scatter-gather buffer
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
* @dlen: TCP payload length
|
|
|
|
* @no_csum: Don't compute IPv4 checksum, use the one from previous buffer
|
|
|
|
* @seq: Sequence number to be sent
|
|
|
|
*/
|
2024-09-18 11:53:05 +10:00
|
|
|
static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn,
|
2024-06-13 14:36:49 +02:00
|
|
|
ssize_t dlen, int no_csum, uint32_t seq)
|
|
|
|
{
|
2024-11-05 20:07:44 -05:00
|
|
|
struct tcp_payload_t *payload;
|
2024-10-28 22:14:00 -04:00
|
|
|
const uint16_t *check = NULL;
|
2024-06-13 14:36:49 +02:00
|
|
|
struct iovec *iov;
|
|
|
|
|
|
|
|
conn->seq_to_tap = seq + dlen;
|
2024-10-28 22:14:00 -04:00
|
|
|
tcp_frame_conns[tcp_payload_used] = conn;
|
|
|
|
iov = tcp_l2_iov[tcp_payload_used];
|
2024-06-13 14:36:49 +02:00
|
|
|
if (CONN_V4(conn)) {
|
|
|
|
if (no_csum) {
|
2024-10-28 22:14:00 -04:00
|
|
|
struct iovec *iov_prev = tcp_l2_iov[tcp_payload_used - 1];
|
2024-06-13 14:36:49 +02:00
|
|
|
struct iphdr *iph = iov_prev[TCP_IOV_IP].iov_base;
|
2024-10-28 22:14:00 -04:00
|
|
|
|
2024-06-13 14:36:49 +02:00
|
|
|
check = &iph->check;
|
|
|
|
}
|
2024-10-28 22:14:00 -04:00
|
|
|
iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_payload_ip[tcp_payload_used]);
|
2024-10-28 22:13:59 -04:00
|
|
|
iov[TCP_IOV_ETH].iov_base = &tcp4_eth_src;
|
2024-06-13 14:36:49 +02:00
|
|
|
} else if (CONN_V6(conn)) {
|
2024-10-28 22:14:00 -04:00
|
|
|
iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_payload_ip[tcp_payload_used]);
|
2024-10-28 22:13:59 -04:00
|
|
|
iov[TCP_IOV_ETH].iov_base = &tcp6_eth_src;
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
2024-11-05 20:07:44 -05:00
|
|
|
payload = iov[TCP_IOV_PAYLOAD].iov_base;
|
|
|
|
payload->th.th_off = sizeof(struct tcphdr) / 4;
|
|
|
|
payload->th.th_x2 = 0;
|
|
|
|
payload->th.th_flags = 0;
|
|
|
|
payload->th.ack = 1;
|
2024-11-22 17:43:36 +01:00
|
|
|
iov[TCP_IOV_PAYLOAD].iov_len = dlen + sizeof(struct tcphdr);
|
2024-11-27 14:54:07 +11:00
|
|
|
tcp_l2_buf_fill_headers(conn, iov, check, seq, false);
|
2024-10-28 22:14:00 -04:00
|
|
|
if (++tcp_payload_used > TCP_FRAMES_MEM - 1)
|
|
|
|
tcp_payload_flush(c);
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tcp_buf_data_from_sock() - Handle new data from socket, queue to tap, in window
|
|
|
|
* @c: Execution context
|
|
|
|
* @conn: Connection pointer
|
|
|
|
*
|
|
|
|
* Return: negative on connection reset, 0 otherwise
|
|
|
|
*
|
|
|
|
* #syscalls recvmsg
|
|
|
|
*/
|
2024-09-18 11:53:05 +10:00
|
|
|
int tcp_buf_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
|
2024-06-13 14:36:49 +02:00
|
|
|
{
|
|
|
|
uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
|
|
|
|
int fill_bufs, send_bufs = 0, last_len, iov_rem = 0;
|
2024-10-28 22:14:00 -04:00
|
|
|
int len, dlen, i, s = conn->sock;
|
2024-06-13 14:36:49 +02:00
|
|
|
struct msghdr mh_sock = { 0 };
|
|
|
|
uint16_t mss = MSS_GET(conn);
|
|
|
|
uint32_t already_sent, seq;
|
|
|
|
struct iovec *iov;
|
|
|
|
|
2024-07-12 15:04:49 -04:00
|
|
|
/* How much have we read/sent since last received ack ? */
|
2024-06-13 14:36:49 +02:00
|
|
|
already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
|
|
|
|
|
|
|
|
if (SEQ_LT(already_sent, 0)) {
|
|
|
|
/* RFC 761, section 2.1. */
|
|
|
|
flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
|
|
|
|
conn->seq_ack_from_tap, conn->seq_to_tap);
|
|
|
|
conn->seq_to_tap = conn->seq_ack_from_tap;
|
|
|
|
already_sent = 0;
|
2024-07-12 15:04:49 -04:00
|
|
|
if (tcp_set_peek_offset(s, 0)) {
|
|
|
|
tcp_rst(c, conn);
|
|
|
|
return -1;
|
|
|
|
}
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!wnd_scaled || already_sent >= wnd_scaled) {
|
tcp: Mask EPOLLIN altogether if we're blocked waiting on an ACK from the guest
There are pretty much two cases of the (misnomer) STALLED: in one
case, we could send more data to the guest if it becomes available,
and in another case, we can't, because we filled the window.
If, in this second case, we keep EPOLLIN enabled, but never read from
the socket, we get short but CPU-annoying storms of EPOLLIN events,
upon which we reschedule the ACK timeout handler, never read from the
socket, go back to epoll_wait(), and so on:
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
also known as:
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
which, for some reason, becomes very visible with muvm and aria2c
downloading from a server nearby in parallel chunks.
That's because EPOLLIN isn't cleared if we don't read from the socket,
and even with EPOLLET, epoll_wait() will repeatedly wake us up until
we actually read something.
In this case, we don't want to subscribe to EPOLLIN at all: all we're
waiting for is an ACK segment from the guest. Differentiate this case
with a new connection flag, ACK_FROM_TAP_BLOCKS, which doesn't just
indicate that we're waiting for an ACK from the guest
(ACK_FROM_TAP_DUE), but also that we're blocked waiting for it.
If this flag is set before we set STALLED, EPOLLIN will be masked
while we set EPOLLET because of STALLED. Whenever we clear STALLED,
we also clear this flag.
This is definitely not elegant, but it's a minimal fix.
We can probably simplify this at a later point by having a category
of connection flags directly corresponding to epoll flags, and
dropping STALLED altogether, or, perhaps, always using EPOLLET (but
we need a mechanism to re-check sockets for pending data if we can't
temporarily write to the guest).
I suspect that this might also be implied in
https://github.com/containers/podman/issues/23686, hence the Link:
tag. It doesn't necessarily mean I'm fixing it (I can't reproduce
that).
Link: https://github.com/containers/podman/issues/23686
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2025-01-16 20:47:00 +01:00
|
|
|
conn_flag(c, conn, ACK_FROM_TAP_BLOCKS);
|
2024-06-13 14:36:49 +02:00
|
|
|
conn_flag(c, conn, STALLED);
|
|
|
|
conn_flag(c, conn, ACK_FROM_TAP_DUE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up buffer descriptors we'll fill completely and partially. */
|
|
|
|
fill_bufs = DIV_ROUND_UP(wnd_scaled - already_sent, mss);
|
|
|
|
if (fill_bufs > TCP_FRAMES) {
|
|
|
|
fill_bufs = TCP_FRAMES;
|
|
|
|
iov_rem = 0;
|
|
|
|
} else {
|
|
|
|
iov_rem = (wnd_scaled - already_sent) % mss;
|
|
|
|
}
|
|
|
|
|
2024-07-12 15:04:49 -04:00
|
|
|
/* Prepare iov according to kernel capability */
|
|
|
|
if (!peek_offset_cap) {
|
|
|
|
mh_sock.msg_iov = iov_sock;
|
|
|
|
iov_sock[0].iov_base = tcp_buf_discard;
|
|
|
|
iov_sock[0].iov_len = already_sent;
|
|
|
|
mh_sock.msg_iovlen = fill_bufs + 1;
|
|
|
|
} else {
|
|
|
|
mh_sock.msg_iov = &iov_sock[1];
|
|
|
|
mh_sock.msg_iovlen = fill_bufs;
|
|
|
|
}
|
2024-06-13 14:36:49 +02:00
|
|
|
|
2024-10-28 22:14:00 -04:00
|
|
|
if (tcp_payload_used + fill_bufs > TCP_FRAMES_MEM) {
|
2024-06-13 14:36:49 +02:00
|
|
|
tcp_payload_flush(c);
|
|
|
|
|
|
|
|
/* Silence Coverity CWE-125 false positive */
|
2024-10-28 22:14:00 -04:00
|
|
|
tcp_payload_used = 0;
|
2024-06-13 14:36:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0, iov = iov_sock + 1; i < fill_bufs; i++, iov++) {
|
2024-10-28 22:14:00 -04:00
|
|
|
iov->iov_base = &tcp_payload[tcp_payload_used + i].data;
|
2024-06-13 14:36:49 +02:00
|
|
|
iov->iov_len = mss;
|
|
|
|
}
|
|
|
|
if (iov_rem)
|
|
|
|
iov_sock[fill_bufs].iov_len = iov_rem;
|
|
|
|
|
|
|
|
/* Receive into buffers, don't dequeue until acknowledged by guest. */
|
|
|
|
do
|
|
|
|
len = recvmsg(s, &mh_sock, MSG_PEEK);
|
|
|
|
while (len < 0 && errno == EINTR);
|
|
|
|
|
2024-10-24 10:50:58 +02:00
|
|
|
if (len < 0) {
|
|
|
|
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
|
|
|
tcp_rst(c, conn);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
tcp: Set EPOLLET when when reading from a socket fails with EAGAIN
Before SO_PEEK_OFF support was introduced by commit e63d281871ef
("tcp: leverage support of SO_PEEK_OFF socket option when available"),
we would peek data from sockets using a "discard" buffer as first
iovec element, so that, unless we had no pending data at all, we would
always get a positive return code from recvmsg() (except for closing
connections or errors).
If we couldn't send more data to the guest, in the window, we would
set the STALLED flag (causing the epoll descriptor to switch to
edge-triggered mode), and return early from tcp_data_from_sock().
With SO_PEEK_OFF, we don't have a discard buffer, and if there's data
on the socket, but nothing beyond our current peeking offset, we'll
get EAGAIN instead of our current "discard" length. In that case, we
return even earlier, and we don't set EPOLLET on the socket as a
result.
As reported by Asahi Lina, this causes event loops where the kernel is
signalling socket readiness, because there's data we didn't dequeue
yet (waiting for the guest to acknowledge it), but we won't actually
peek anything new, and return early without setting EPOLLET.
This is the original report, mentioning the originally proposed fix:
--
When there is unacknowledged data in the inbound socket buffer, passt
leaves the socket in the epoll instance to accept new data from the
server. Since there is already data in the socket buffer, an epoll
without EPOLLET will repeatedly fire while no data is processed,
busy-looping the CPU:
epoll_pwait(3, [...], 8, 1000, NULL, 8) = 4
recvmsg(25, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(169, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(111, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(180, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
epoll_pwait(3, [...], 8, 1000, NULL, 8) = 4
recvmsg(25, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(169, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(111, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
recvmsg(180, {msg_namelen=0}, MSG_PEEK) = -1 EAGAIN (Resource temporarily unavailable)
Add in the missing EPOLLET flag for this case. This brings CPU
usage down from around ~80% when downloading over TCP, to ~5% (use
case: passt as network transport for muvm, downloading Steam games).
--
we can't set EPOLLET unconditionally though, at least right now,
because we don't monitor the guest tap for EPOLLOUT in case we fail
to write on that side because we filled up that buffer (and not the
window of a TCP connection).
Instead, rely on the observation that, once a connection is
established, we only get EAGAIN on recvmsg() if we are attempting to
peek data from a socket with a non-zero peeking offset: we only peek
when there's pending data on a socket, and in that case, if we peek
without offset, we'll always see some data.
And if we peek data with a non-zero offset and get EAGAIN, that means
that we're either waiting for more data to arrive on the socket (which
would cause further wake-ups, even with EPOLLET), or we're waiting for
the guest to acknowledge some of it, which would anyway cause a
wake-up.
In that case, it's safe to set STALLED and, in turn, EPOLLET on the
socket, which fixes the EPOLLIN event loop.
While we're establishing a connection from the socket side, though,
we'll call, once, tcp_{buf,vu}_data_from_sock() to see if we got
any data while we were waiting for SYN, ACK from the guest. See the
comment at the end of tcp_conn_from_sock_finish().
And if there's no data queued on the socket as we check, we'll also
get EAGAIN, even if our peeking offset is zero. For this reason, we
need to additionally check that 'already_sent' is not zero, meaning,
explicitly, that our peeking offset is not zero.
Reported-by: Asahi Lina <lina@asahilina.net>
Fixes: e63d281871ef ("tcp: leverage support of SO_PEEK_OFF socket option when available")
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2025-01-14 23:03:49 +01:00
|
|
|
if (already_sent) /* No new data and EAGAIN: set EPOLLET */
|
|
|
|
conn_flag(c, conn, STALLED);
|
|
|
|
|
2024-10-24 10:50:58 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
if (!len) {
|
|
|
|
if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) == SOCK_FIN_RCVD) {
|
2024-10-24 10:50:58 +02:00
|
|
|
int ret = tcp_buf_send_flag(c, conn, FIN | ACK);
|
|
|
|
if (ret) {
|
2024-06-13 14:36:49 +02:00
|
|
|
tcp_rst(c, conn);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn_event(c, conn, TAP_FIN_SENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-07-12 15:04:49 -04:00
|
|
|
if (!peek_offset_cap)
|
2024-10-24 10:50:58 +02:00
|
|
|
len -= already_sent;
|
2024-07-12 15:04:49 -04:00
|
|
|
|
2024-10-24 10:50:58 +02:00
|
|
|
if (len <= 0) {
|
2024-06-13 14:36:49 +02:00
|
|
|
conn_flag(c, conn, STALLED);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
tcp: Mask EPOLLIN altogether if we're blocked waiting on an ACK from the guest
There are pretty much two cases of the (misnomer) STALLED: in one
case, we could send more data to the guest if it becomes available,
and in another case, we can't, because we filled the window.
If, in this second case, we keep EPOLLIN enabled, but never read from
the socket, we get short but CPU-annoying storms of EPOLLIN events,
upon which we reschedule the ACK timeout handler, never read from the
socket, go back to epoll_wait(), and so on:
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
timerfd_settime(76, 0, {it_interval={tv_sec=0, tv_nsec=0}, it_value={tv_sec=2, tv_nsec=0}}, NULL) = 0
epoll_wait(3, [{events=EPOLLIN, data={u32=10497, u64=38654716161}}], 8, 1000) = 1
also known as:
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
29.1517: Flow 2 (TCP connection): timer expires in 2.000s
which, for some reason, becomes very visible with muvm and aria2c
downloading from a server nearby in parallel chunks.
That's because EPOLLIN isn't cleared if we don't read from the socket,
and even with EPOLLET, epoll_wait() will repeatedly wake us up until
we actually read something.
In this case, we don't want to subscribe to EPOLLIN at all: all we're
waiting for is an ACK segment from the guest. Differentiate this case
with a new connection flag, ACK_FROM_TAP_BLOCKS, which doesn't just
indicate that we're waiting for an ACK from the guest
(ACK_FROM_TAP_DUE), but also that we're blocked waiting for it.
If this flag is set before we set STALLED, EPOLLIN will be masked
while we set EPOLLET because of STALLED. Whenever we clear STALLED,
we also clear this flag.
This is definitely not elegant, but it's a minimal fix.
We can probably simplify this at a later point by having a category
of connection flags directly corresponding to epoll flags, and
dropping STALLED altogether, or, perhaps, always using EPOLLET (but
we need a mechanism to re-check sockets for pending data if we can't
temporarily write to the guest).
I suspect that this might also be implied in
https://github.com/containers/podman/issues/23686, hence the Link:
tag. It doesn't necessarily mean I'm fixing it (I can't reproduce
that).
Link: https://github.com/containers/podman/issues/23686
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
2025-01-16 20:47:00 +01:00
|
|
|
conn_flag(c, conn, ~ACK_FROM_TAP_BLOCKS);
|
2024-06-13 14:36:49 +02:00
|
|
|
conn_flag(c, conn, ~STALLED);
|
|
|
|
|
2024-10-24 10:50:58 +02:00
|
|
|
send_bufs = DIV_ROUND_UP(len, mss);
|
|
|
|
last_len = len - (send_bufs - 1) * mss;
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
/* Likely, some new data was acked too. */
|
2024-09-18 11:53:07 +10:00
|
|
|
tcp_update_seqack_wnd(c, conn, false, NULL);
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
/* Finally, queue to tap */
|
|
|
|
dlen = mss;
|
|
|
|
seq = conn->seq_to_tap;
|
|
|
|
for (i = 0; i < send_bufs; i++) {
|
2024-10-28 22:14:00 -04:00
|
|
|
int no_csum = i && i != send_bufs - 1 && tcp_payload_used;
|
2024-06-13 14:36:49 +02:00
|
|
|
|
|
|
|
if (i == send_bufs - 1)
|
|
|
|
dlen = last_len;
|
|
|
|
|
|
|
|
tcp_data_to_tap(c, conn, dlen, no_csum, seq);
|
|
|
|
seq += dlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn_flag(c, conn, ACK_FROM_TAP_DUE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|