1
0
mirror of https://passt.top/passt synced 2024-12-22 21:55:22 +00:00

flow: Abstract allocation of new flows with helper function

Currently tcp.c open codes the process of allocating a new flow from the
flow table: twice, in fact, once for guest to host and once for host to
guest connections.  This duplication isn't ideal and will get worse as we
add more protocols to the flow table.  It also makes it harder to
experiment with different ways of handling flow table allocation.

Instead, introduce a function to allocate a new flow: flow_alloc().  In
some cases we currently check if we're able to allocate, but delay the
actual allocation.  We now handle that slightly differently with a
flow_alloc_cancel() function to back out a recent allocation.  We have that
separate from a flow_free() function, because future changes we have in
mind will need to handle this case a little differently.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
This commit is contained in:
David Gibson 2024-01-16 11:50:41 +11:00 committed by Stefano Brivio
parent fb7c00169d
commit 4a849e9526
3 changed files with 47 additions and 11 deletions

26
flow.c
View File

@ -50,6 +50,32 @@ void flow_log_(const struct flow_common *f, int pri, const char *fmt, ...)
logmsg(pri, "Flow %u (%s): %s", flow_idx(f), FLOW_TYPE(f), msg);
}
/**
* flow_alloc() - Allocate a new flow
*
* Return: pointer to an unused flow entry, or NULL if the table is full
*/
union flow *flow_alloc(void)
{
if (flow_count >= FLOW_MAX)
return NULL;
return &flowtab[flow_count++];
}
/**
* flow_alloc_cancel() - Free a newly allocated flow
* @flow: Flow to deallocate
*
* @flow must be the last flow allocated by flow_alloc()
*/
void flow_alloc_cancel(union flow *flow)
{
ASSERT(FLOW_IDX(flow) == flow_count - 1);
memset(flow, 0, sizeof(*flow));
flow_count--;
}
/**
* flow_table_compact() - Perform compaction on flow table
* @c: Execution context

View File

@ -88,4 +88,7 @@ static inline flow_sidx_t flow_sidx(const struct flow_common *f,
*/
#define FLOW_SIDX(f_, side) (flow_sidx(&(f_)->f, (side)))
union flow *flow_alloc(void);
void flow_alloc_cancel(union flow *flow);
#endif /* FLOW_TABLE_H */

29
tcp.c
View File

@ -1944,17 +1944,18 @@ static void tcp_conn_from_tap(struct ctx *c,
};
const struct sockaddr *sa;
struct tcp_tap_conn *conn;
union flow *flow;
socklen_t sl;
int s, mss;
(void)saddr;
if (flow_count >= FLOW_MAX)
if (!(flow = flow_alloc()))
return;
if ((s = tcp_conn_pool_sock(pool)) < 0)
if ((s = tcp_conn_new_sock(c, af)) < 0)
return;
goto cancel;
if (!c->no_map_gw) {
if (af == AF_INET && IN4_ARE_ADDR_EQUAL(daddr, &c->ip4.gw))
@ -1969,13 +1970,11 @@ static void tcp_conn_from_tap(struct ctx *c,
.sin6_addr = c->ip6.addr_ll,
.sin6_scope_id = c->ifi6,
};
if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll))) {
close(s);
return;
}
if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll)))
goto cancel;
}
conn = CONN(flow_count++);
conn = &flow->tcp;
conn->f.type = FLOW_TCP;
conn->sock = s;
conn->timer = -1;
@ -2047,6 +2046,12 @@ static void tcp_conn_from_tap(struct ctx *c,
}
tcp_epoll_ctl(c, conn);
return;
cancel:
if (s >= 0)
close(s);
flow_alloc_cancel(flow);
}
/**
@ -2724,14 +2729,12 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref,
union flow *flow;
int s;
if (c->no_tcp || flow_count >= FLOW_MAX)
if (c->no_tcp || !(flow = flow_alloc()))
return;
s = accept4(ref.fd, (struct sockaddr *)&sa, &sl, SOCK_NONBLOCK);
if (s < 0)
return;
flow = flowtab + flow_count++;
goto cancel;
if (c->mode == MODE_PASTA &&
tcp_splice_conn_from_sock(c, ref.tcp_listen, &flow->tcp_splice,
@ -2740,6 +2743,10 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref,
tcp_tap_conn_from_sock(c, ref.tcp_listen, &flow->tcp, s,
(struct sockaddr *)&sa, now);
return;
cancel:
flow_alloc_cancel(flow);
}
/**