mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-29 00:55:22 +00:00
b379fee117
The default memlock limit is 64k which is not enough to start a single VM. The requirements for one VM are 12k, 8k for eBPF map and 4k for eBPF program, however, it fails to create eBPF map and program with 64k limit. By testing I figured out that the minimal limit is 80k to start a single VM with functional eBPF and if I add 12k I can start another one. This leads into following calculation: 80k as memlock limit worked to start a VM with eBPF which means there is 68k of lock memory that I was not able to figure out what was using it. So to get a number for 4096 VMs: 68 + 12 * 4096 = 49220 If we round it up we will get 64M of memory lock limit to support 4096 VMs with default map size which can hold 64 entries for devices. This should be good enough as a sane default and users can change it if the need to. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1807090 Signed-off-by: Pavel Hrdina <phrdina@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
47 lines
1.5 KiB
SYSTEMD
47 lines
1.5 KiB
SYSTEMD
[Unit]
|
|
Description=Virtualization lxc daemon
|
|
Conflicts=libvirtd.service
|
|
Requires=virtlxcd.socket
|
|
Requires=virtlxcd-ro.socket
|
|
Requires=virtlxcd-admin.socket
|
|
Wants=systemd-machined.service
|
|
Before=libvirt-guests.service
|
|
After=network.target
|
|
After=dbus.service
|
|
After=apparmor.service
|
|
After=local-fs.target
|
|
After=remote-fs.target
|
|
After=systemd-logind.service
|
|
After=systemd-machined.service
|
|
Documentation=man:libvirtd(8)
|
|
Documentation=https://libvirt.org
|
|
|
|
[Service]
|
|
Type=notify
|
|
ExecStart=@sbindir@/virtlxcd --timeout 120
|
|
ExecReload=/bin/kill -HUP $MAINPID
|
|
KillMode=process
|
|
Restart=on-failure
|
|
# At least 1 FD per guest, often 2 (eg qemu monitor + qemu agent).
|
|
# eg if we want to support 4096 guests, we'll typically need 8192 FDs
|
|
# If changing this, also consider virtlogd.service & virtlockd.service
|
|
# limits which are also related to number of guests
|
|
LimitNOFILE=8192
|
|
# The cgroups pids controller can limit the number of tasks started by
|
|
# the daemon, which can limit the number of domains for some hypervisors.
|
|
# A conservative default of 8 tasks per guest results in a TasksMax of
|
|
# 32k to support 4096 guests.
|
|
TasksMax=32768
|
|
# With cgroups v2 there is no devices controller anymore, we have to use
|
|
# eBPF to control access to devices. In order to do that we create a eBPF
|
|
# hash MAP which locks memory. The default map size for 64 devices together
|
|
# with program takes 12k per guest. After rounding up we will get 64M to
|
|
# support 4096 guests.
|
|
LimitMEMLOCK=64M
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
Also=virtlxcd.socket
|
|
Also=virtlxcd-ro.socket
|
|
Also=virtlxcd-admin.socket
|