2020-08-03 06:50:49 +00:00
|
|
|
project(
|
2020-08-18 21:57:39 +00:00
|
|
|
'libvirt', 'c',
|
2024-11-01 09:16:35 +00:00
|
|
|
version: '10.10.0',
|
2020-08-18 21:57:39 +00:00
|
|
|
license: 'LGPLv2+',
|
2022-10-07 07:31:25 +00:00
|
|
|
meson_version: '>= 0.56.0',
|
2020-08-18 21:57:39 +00:00
|
|
|
default_options: [
|
|
|
|
'buildtype=debugoptimized',
|
|
|
|
'b_pie=true',
|
|
|
|
'c_std=gnu99',
|
|
|
|
'warning_level=2',
|
|
|
|
],
|
2020-08-03 06:50:49 +00:00
|
|
|
)
|
|
|
|
|
meson: Work around configure_file(copy:true) deprecation
In our meson scripts, we use configure_file(copy:true) to copy
files from srcdir into builddir. However, as of meson-0.64.0,
this is deprecated [1] in favor of using:
fs = import('fs')
fs.copyfile(in, out)
Except, the submodule's new method wasn't introduced until
0.64.0. And since we can't bump the minimal meson version we
require, we have to work with both: new and old versions.
Now, the fun part: fs.copyfile() is not a drop in replacement as
it returns different type (a custom_target object). This is
incompatible with places where we store the configure_file()
retval in a variable to process it further.
While we could just replace 'copy:true' with a dummy
'configuration:...' (say 'configuration: configmake_conf') we
can't do that for binary files (like src/fonts/ or src/images/).
Therefore, places where we are not interested in the retval can
be switched to fs.copyfile() and places where we are interested
in the retval will just use a dummy 'configuration:'.
Except, src/network/meson.build. In here we not just copy the
file but also specify alternative install dir and that's not
something that fs.copyfile() can handle. Yet, using 'copy: true'
is viewed wrong [2].
1: https://mesonbuild.com/Release-notes-for-0-64-0.html#fscopyfile-to-replace-configure_filecopy-true
2: https://github.com/mesonbuild/meson/pull/10042
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Martin Kletzander <mkletzan@redhat.com>
2023-03-23 10:11:42 +00:00
|
|
|
if meson.version().version_compare('>=0.64.0')
|
|
|
|
fs = import('fs')
|
|
|
|
endif
|
2020-08-03 06:50:49 +00:00
|
|
|
|
|
|
|
# figure out if we are building from git
|
|
|
|
|
2024-07-24 08:56:04 +00:00
|
|
|
git = run_command('test', '-e', '.git', check: false).returncode() == 0
|
2020-08-03 06:50:49 +00:00
|
|
|
|
|
|
|
if git and not get_option('no_git')
|
2022-01-22 19:30:11 +00:00
|
|
|
run_command('git', 'submodule', 'update', '--init', check: true)
|
2020-08-03 06:50:49 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
|
2023-04-30 09:52:27 +00:00
|
|
|
# detect operating system
|
|
|
|
|
|
|
|
os_release = run_command('grep', '-E', '^ID(_LIKE)*=', '/etc/os-release', check: false).stdout()
|
|
|
|
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
# prepare build configuration data
|
|
|
|
|
|
|
|
conf = configuration_data()
|
|
|
|
|
|
|
|
conf.set('_GNU_SOURCE', 1)
|
2022-10-07 07:31:32 +00:00
|
|
|
conf.set_quoted('abs_top_builddir', meson.project_build_root())
|
2022-10-07 07:37:43 +00:00
|
|
|
conf.set_quoted('abs_top_srcdir', meson.project_source_root())
|
2020-08-03 06:50:49 +00:00
|
|
|
conf.set_quoted('PACKAGE', meson.project_name())
|
|
|
|
conf.set_quoted('PACKAGE_NAME', meson.project_name())
|
|
|
|
conf.set_quoted('PACKAGE_VERSION', meson.project_version())
|
|
|
|
conf.set_quoted('VERSION', meson.project_version())
|
|
|
|
|
2020-06-16 21:47:04 +00:00
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
# For AI_ADDRCONFIG
|
|
|
|
conf.set('_WIN32_WINNT', '0x0600') # Win Vista / Server 2008
|
|
|
|
conf.set('WINVER', '0x0600') # Win Vista / Server 2008
|
|
|
|
endif
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
|
|
|
|
# set various paths
|
|
|
|
|
|
|
|
if get_option('system')
|
|
|
|
prefix = '/usr'
|
|
|
|
libdir = prefix / 'lib64'
|
2022-01-22 19:30:11 +00:00
|
|
|
if run_command('test', '-d', libdir, check: false).returncode() != 0
|
2020-08-03 06:50:49 +00:00
|
|
|
libdir = prefix / 'lib'
|
|
|
|
endif
|
|
|
|
localstatedir = '/var'
|
|
|
|
sysconfdir = '/etc'
|
|
|
|
else
|
|
|
|
prefix = get_option('prefix')
|
|
|
|
libdir = prefix / get_option('libdir')
|
|
|
|
localstatedir = prefix / get_option('localstatedir')
|
|
|
|
sysconfdir = prefix / get_option('sysconfdir')
|
|
|
|
endif
|
|
|
|
|
|
|
|
# if --prefix is /usr, don't use /usr/var for localstatedir or /usr/etc for
|
|
|
|
# sysconfdir as this makes a lot of things break in testing situations
|
|
|
|
if prefix == '/usr'
|
|
|
|
if localstatedir == '/usr/var'
|
|
|
|
localstatedir = '/var'
|
|
|
|
endif
|
|
|
|
if sysconfdir == '/usr/etc'
|
|
|
|
sysconfdir = '/etc'
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
runstatedir = get_option('runstatedir')
|
|
|
|
if runstatedir == ''
|
|
|
|
runstatedir = localstatedir / 'run'
|
|
|
|
endif
|
|
|
|
|
2023-04-29 16:13:56 +00:00
|
|
|
initconfdir = get_option('initconfdir')
|
|
|
|
if initconfdir == ''
|
2023-04-30 10:02:38 +00:00
|
|
|
if (os_release.contains('alpine') or
|
|
|
|
os_release.contains('arch') or
|
|
|
|
os_release.contains('gentoo'))
|
|
|
|
initconfdir = sysconfdir / 'conf.d'
|
|
|
|
# Ubuntu has ID_LIKE=debian
|
|
|
|
elif os_release.contains('debian')
|
|
|
|
initconfdir = sysconfdir / 'default'
|
|
|
|
else
|
|
|
|
initconfdir = sysconfdir / 'sysconfig'
|
|
|
|
endif
|
2023-04-29 16:13:56 +00:00
|
|
|
endif
|
|
|
|
|
2024-06-06 11:57:08 +00:00
|
|
|
unitdir = get_option('unitdir')
|
|
|
|
if unitdir == ''
|
|
|
|
unitdir = prefix / 'lib' / 'systemd' / 'system'
|
|
|
|
endif
|
|
|
|
|
2024-06-13 09:25:07 +00:00
|
|
|
sysusersdir = get_option('sysusersdir')
|
2024-06-06 11:57:08 +00:00
|
|
|
if sysusersdir == ''
|
|
|
|
sysusersdir = prefix / 'lib' / 'sysusers.d'
|
|
|
|
endif
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
bindir = prefix / get_option('bindir')
|
|
|
|
datadir = prefix / get_option('datadir')
|
|
|
|
includedir = prefix / get_option('includedir')
|
|
|
|
infodir = prefix / get_option('infodir')
|
|
|
|
libexecdir = prefix / get_option('libexecdir')
|
|
|
|
localedir = prefix / get_option('localedir')
|
|
|
|
mandir = prefix / get_option('mandir')
|
|
|
|
sbindir = prefix / get_option('sbindir')
|
|
|
|
sharedstatedir = prefix / get_option('sharedstatedir')
|
|
|
|
|
2021-02-26 19:11:06 +00:00
|
|
|
docdir = get_option('docdir')
|
|
|
|
if docdir == ''
|
|
|
|
docdir = datadir / 'doc' / meson.project_name()
|
|
|
|
endif
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
confdir = sysconfdir / meson.project_name()
|
|
|
|
pkgdatadir = datadir / meson.project_name()
|
|
|
|
|
2024-04-16 14:32:26 +00:00
|
|
|
sshconfdir = get_option('sshconfdir')
|
|
|
|
if sshconfdir == ''
|
|
|
|
sshconfdir = sysconfdir / 'ssh' / 'ssh_config.d'
|
|
|
|
endif
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
|
2020-06-18 13:25:15 +00:00
|
|
|
# generate configmake.h header
|
|
|
|
|
|
|
|
configmake_conf = configuration_data()
|
|
|
|
configmake_conf.set_quoted('BINDIR', bindir)
|
|
|
|
configmake_conf.set_quoted('DATADIR', datadir)
|
|
|
|
configmake_conf.set_quoted('LIBDIR', libdir)
|
|
|
|
configmake_conf.set_quoted('LIBEXECDIR', libexecdir)
|
|
|
|
configmake_conf.set_quoted('LOCALEDIR', localedir)
|
|
|
|
configmake_conf.set_quoted('LOCALSTATEDIR', localstatedir)
|
|
|
|
configmake_conf.set_quoted('MANDIR', mandir)
|
|
|
|
configmake_conf.set_quoted('PKGDATADIR', pkgdatadir)
|
|
|
|
configmake_conf.set_quoted('PREFIX', prefix)
|
|
|
|
configmake_conf.set_quoted('RUNSTATEDIR', runstatedir)
|
|
|
|
configmake_conf.set_quoted('SBINDIR', sbindir)
|
|
|
|
configmake_conf.set_quoted('SYSCONFDIR', sysconfdir)
|
|
|
|
|
|
|
|
configure_file(
|
|
|
|
input: 'configmake.h.in',
|
2020-08-25 16:30:57 +00:00
|
|
|
output: '@BASENAME@',
|
2020-06-18 13:25:15 +00:00
|
|
|
configuration: configmake_conf,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2020-07-28 15:22:42 +00:00
|
|
|
# packager options
|
|
|
|
|
|
|
|
packager = get_option('packager')
|
|
|
|
packager_version = get_option('packager_version')
|
|
|
|
|
|
|
|
if packager != ''
|
|
|
|
conf.set_quoted('PACKAGER', packager)
|
|
|
|
endif
|
|
|
|
|
|
|
|
if packager_version != ''
|
|
|
|
conf.set_quoted('PACKAGER_VERSION', packager_version)
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-08-19 09:15:35 +00:00
|
|
|
# Add RPATH information when building for a non-standard prefix, or
|
|
|
|
# when explicitly requested to do so
|
|
|
|
|
|
|
|
if prefix == '/usr' and not get_option('rpath').enabled()
|
|
|
|
libvirt_rpath = ''
|
|
|
|
else
|
|
|
|
libvirt_rpath = libdir
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
# figure out libvirt version strings
|
|
|
|
|
|
|
|
arr_version = meson.project_version().split('.')
|
|
|
|
libvirt_version_number = 1000000 * arr_version[0].to_int() + 1000 * arr_version[1].to_int() + arr_version[2].to_int()
|
|
|
|
|
|
|
|
conf.set('LIBVIRT_VERSION_NUMBER', libvirt_version_number)
|
|
|
|
|
|
|
|
# In libtool terminology we need to figure out:
|
|
|
|
#
|
|
|
|
# CURRENT
|
|
|
|
# The most recent interface number that this library implements.
|
|
|
|
#
|
|
|
|
# REVISION
|
|
|
|
# The implementation number of the CURRENT interface.
|
|
|
|
#
|
|
|
|
# AGE
|
|
|
|
# The difference between the newest and oldest interfaces that this
|
|
|
|
# library implements.
|
|
|
|
#
|
|
|
|
# In other words, the library implements all the interface numbers
|
|
|
|
# in the range from number `CURRENT - AGE' to `CURRENT'.
|
|
|
|
#
|
|
|
|
# Libtool assigns the soname version from `CURRENT - AGE', and we
|
|
|
|
# don't want that to ever change in libvirt. ie it must always be
|
|
|
|
# zero, to produce libvirt.so.0.
|
|
|
|
#
|
|
|
|
# We would, however, like the libvirt version number reflected
|
|
|
|
# in the so version'd symlinks, and this is based on AGE.REVISION
|
|
|
|
# eg libvirt.so.0.AGE.REVISION
|
|
|
|
#
|
|
|
|
# The following examples show what libtool will do
|
|
|
|
#
|
|
|
|
# Input: 0.9.14 -> libvirt.so.0.9.14
|
|
|
|
# Input: 1.0.0 -> libvirt.so.0.1000.0
|
|
|
|
# Input: 2.5.8 -> libvirt.so.0.2005.8
|
|
|
|
#
|
|
|
|
# Assuming we do ever want to break soname version, this can
|
|
|
|
# toggled. But seriously, don't ever touch this.
|
|
|
|
|
|
|
|
libvirt_so_version = 0
|
|
|
|
libvirt_age = 1000 * arr_version[0].to_int() + arr_version[1].to_int()
|
|
|
|
libvirt_revision = arr_version[2].to_int()
|
|
|
|
libvirt_lib_version = '@0@.@1@.@2@'.format(libvirt_so_version, libvirt_age, libvirt_revision)
|
|
|
|
|
|
|
|
|
2020-04-30 12:50:46 +00:00
|
|
|
# check compile flags
|
|
|
|
|
|
|
|
cc = meson.get_compiler('c')
|
2020-07-24 14:35:03 +00:00
|
|
|
cc_flags = []
|
2020-04-30 12:50:46 +00:00
|
|
|
|
2020-07-24 14:35:03 +00:00
|
|
|
git_werror = get_option('git_werror')
|
2021-04-08 10:50:30 +00:00
|
|
|
if (git_werror.enabled() or git_werror.auto()) and git and not get_option('werror')
|
2020-07-24 14:35:03 +00:00
|
|
|
cc_flags += [ '-Werror' ]
|
|
|
|
endif
|
|
|
|
|
2021-04-08 10:23:03 +00:00
|
|
|
|
|
|
|
# gcc --help=warnings outputs
|
|
|
|
ptrdiff_max = cc.sizeof('ptrdiff_t', prefix: '#include <stddef.h>')
|
|
|
|
size_max = cc.sizeof('size_t', prefix: '#include <stdint.h>')
|
|
|
|
# Compute max safe object size by checking ptrdiff_t and size_t sizes.
|
|
|
|
# Ideally we would get PTRDIFF_MAX and SIZE_MAX values but it would
|
|
|
|
# give us (2147483647L) and we would have to remove the () and the suffix
|
|
|
|
# in order to convert it to numbers to be able to pick the smaller one.
|
|
|
|
alloc_max = run_command(
|
|
|
|
'python3', '-c',
|
|
|
|
'print(min(2**(@0@ * 8 - 1) - 1, 2**(@1@ * 8) - 1))'.format(ptrdiff_max, size_max),
|
2022-01-22 19:30:11 +00:00
|
|
|
check: true,
|
2021-04-08 10:23:03 +00:00
|
|
|
)
|
|
|
|
|
2023-09-04 10:10:25 +00:00
|
|
|
stack_frame_size = 2048
|
|
|
|
|
|
|
|
# clang without optimization enlarges stack frames in certain corner cases
|
|
|
|
if cc.get_id() == 'clang' and get_option('optimization') == '0'
|
|
|
|
stack_frame_size = 4096
|
|
|
|
endif
|
|
|
|
|
2021-05-06 15:08:32 +00:00
|
|
|
# sanitizer instrumentation may enlarge stack frames
|
2023-09-04 12:30:48 +00:00
|
|
|
if get_option('b_sanitize') != 'none'
|
2023-09-04 12:07:31 +00:00
|
|
|
stack_frame_size = 32768
|
2023-09-04 10:10:25 +00:00
|
|
|
endif
|
2021-05-06 15:08:32 +00:00
|
|
|
|
meson: disable bogus warnings from sanitizers on Fedora
When building with sanitizers on Fedora we get a wierd error
message
In file included from /usr/include/string.h:519,
from ../src/internal.h:28,
from ../src/util/virsocket.h:21,
from ../src/util/virsocketaddr.h:21,
from ../src/util/virnetdevip.h:21,
from ../src/util/virnetdevip.c:21:
In function ‘memcpy’,
inlined from ‘virNetDevGetifaddrsAddress’ at ../src/util/virnetdevip.c:702:13,
inlined from ‘virNetDevIPAddrGet’ at ../src/util/virnetdevip.c:754:16:
/usr/include/bits/string_fortified.h:29:10: error: ‘__builtin_memcpy’ offset [2, 27] from the object at ‘addr’ is out of the bounds of referenced subobject ‘ss_family’ with type ‘short unsigned int’ at offset 0 [-Werror=array-bounds]
29 | return __builtin___memcpy_chk (__dest, __src, __len,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
30 | __glibc_objsize0 (__dest));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from /usr/include/bits/socket.h:175,
from /usr/include/sys/socket.h:33,
from ../src/util/virsocket.h:66,
from ../src/util/virsocketaddr.h:21,
from ../src/util/virnetdevip.h:21,
from ../src/util/virnetdevip.c:21:
../src/util/virnetdevip.c: In function ‘virNetDevIPAddrGet’:
/usr/include/bits/socket.h:193:5: note: subobject ‘ss_family’ declared here
193 | __SOCKADDR_COMMON (ss_); /* Address family, etc. */
| ^~~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors
The code is correct, and this only happens when building at -O2.
The docs for -Warray-bounds say that a value of "2" is known to
be liable to generate false positives. Rather than downgrade the
check everywhere, we do it selectively for sanitizers.
Reviewed-by: Tim Wiederhake <twiederh@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2021-07-19 18:16:59 +00:00
|
|
|
# array_bounds=2 check triggers false positive on some GCC
|
|
|
|
# versions when using sanitizers. Seen on Fedora 34 with
|
|
|
|
# GCC 11.1.1
|
|
|
|
array_bounds = get_option('b_sanitize') == 'none' ? 2 : 1
|
|
|
|
|
2020-07-24 14:35:03 +00:00
|
|
|
cc_flags += [
|
2021-04-08 10:23:03 +00:00
|
|
|
'-fasynchronous-unwind-tables',
|
|
|
|
'-fexceptions',
|
|
|
|
'-fipa-pure-const',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-fno-common',
|
|
|
|
'-Wabsolute-value',
|
|
|
|
'-Waddress',
|
|
|
|
'-Waddress-of-packed-member',
|
|
|
|
'-Waggressive-loop-optimizations',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Walloc-size-larger-than=@0@'.format(alloc_max.stdout().strip()),
|
2021-05-27 10:45:54 +00:00
|
|
|
'-Walloca',
|
meson: disable bogus warnings from sanitizers on Fedora
When building with sanitizers on Fedora we get a wierd error
message
In file included from /usr/include/string.h:519,
from ../src/internal.h:28,
from ../src/util/virsocket.h:21,
from ../src/util/virsocketaddr.h:21,
from ../src/util/virnetdevip.h:21,
from ../src/util/virnetdevip.c:21:
In function ‘memcpy’,
inlined from ‘virNetDevGetifaddrsAddress’ at ../src/util/virnetdevip.c:702:13,
inlined from ‘virNetDevIPAddrGet’ at ../src/util/virnetdevip.c:754:16:
/usr/include/bits/string_fortified.h:29:10: error: ‘__builtin_memcpy’ offset [2, 27] from the object at ‘addr’ is out of the bounds of referenced subobject ‘ss_family’ with type ‘short unsigned int’ at offset 0 [-Werror=array-bounds]
29 | return __builtin___memcpy_chk (__dest, __src, __len,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
30 | __glibc_objsize0 (__dest));
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from /usr/include/bits/socket.h:175,
from /usr/include/sys/socket.h:33,
from ../src/util/virsocket.h:66,
from ../src/util/virsocketaddr.h:21,
from ../src/util/virnetdevip.h:21,
from ../src/util/virnetdevip.c:21:
../src/util/virnetdevip.c: In function ‘virNetDevIPAddrGet’:
/usr/include/bits/socket.h:193:5: note: subobject ‘ss_family’ declared here
193 | __SOCKADDR_COMMON (ss_); /* Address family, etc. */
| ^~~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors
The code is correct, and this only happens when building at -O2.
The docs for -Warray-bounds say that a value of "2" is known to
be liable to generate false positives. Rather than downgrade the
check everywhere, we do it selectively for sanitizers.
Reviewed-by: Tim Wiederhake <twiederh@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2021-07-19 18:16:59 +00:00
|
|
|
'-Warray-bounds=@0@'.format(array_bounds),
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wattribute-alias=2',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wattribute-warning',
|
|
|
|
'-Wattributes',
|
|
|
|
'-Wbool-compare',
|
|
|
|
'-Wbool-operation',
|
|
|
|
'-Wbuiltin-declaration-mismatch',
|
|
|
|
'-Wbuiltin-macro-redefined',
|
|
|
|
'-Wcannot-profile',
|
|
|
|
'-Wcast-align',
|
|
|
|
'-Wcast-align=strict',
|
2021-04-08 10:23:03 +00:00
|
|
|
# We do "bad" function casts all the time for event callbacks
|
|
|
|
'-Wno-cast-function-type',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wchar-subscripts',
|
|
|
|
'-Wclobbered',
|
|
|
|
'-Wcomment',
|
|
|
|
'-Wcomments',
|
|
|
|
'-Wcoverage-mismatch',
|
|
|
|
'-Wcpp',
|
|
|
|
'-Wdangling-else',
|
|
|
|
'-Wdate-time',
|
2020-07-27 20:49:55 +00:00
|
|
|
'-Wdeclaration-after-statement',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wdeprecated-declarations',
|
|
|
|
'-Wdesignated-init',
|
|
|
|
'-Wdiscarded-array-qualifiers',
|
|
|
|
'-Wdiscarded-qualifiers',
|
|
|
|
'-Wdiv-by-zero',
|
|
|
|
'-Wduplicated-cond',
|
|
|
|
'-Wduplicate-decl-specifier',
|
|
|
|
'-Wempty-body',
|
|
|
|
'-Wendif-labels',
|
|
|
|
'-Wexpansion-to-defined',
|
|
|
|
'-Wformat-contains-nul',
|
|
|
|
'-Wformat-extra-args',
|
2021-04-08 10:23:03 +00:00
|
|
|
# -Wformat=2 implies -Wformat-nonliteral so we need to manually exclude it
|
|
|
|
'-Wno-format-nonliteral',
|
|
|
|
'-Wformat-overflow=2',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wformat-security',
|
2021-04-08 10:23:03 +00:00
|
|
|
# -Wformat enables this by default, and we should keep it,
|
|
|
|
# but need to rewrite various areas of code first
|
|
|
|
'-Wno-format-truncation',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wformat-y2k',
|
|
|
|
'-Wformat-zero-length',
|
|
|
|
'-Wframe-address',
|
2021-05-06 15:08:32 +00:00
|
|
|
'-Wframe-larger-than=@0@'.format(stack_frame_size),
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wfree-nonheap-object',
|
|
|
|
'-Whsa',
|
|
|
|
'-Wif-not-aligned',
|
|
|
|
'-Wignored-attributes',
|
|
|
|
'-Wignored-qualifiers',
|
|
|
|
'-Wimplicit',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wimplicit-fallthrough=5',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wimplicit-function-declaration',
|
|
|
|
'-Wimplicit-int',
|
|
|
|
'-Wincompatible-pointer-types',
|
|
|
|
'-Winit-self',
|
|
|
|
'-Winline',
|
|
|
|
'-Wint-conversion',
|
|
|
|
'-Wint-in-bool-context',
|
|
|
|
'-Wint-to-pointer-cast',
|
|
|
|
'-Winvalid-memory-model',
|
|
|
|
'-Winvalid-pch',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wjump-misses-init',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wlogical-not-parentheses',
|
|
|
|
'-Wlogical-op',
|
|
|
|
'-Wmain',
|
|
|
|
'-Wmaybe-uninitialized',
|
|
|
|
'-Wmemset-elt-size',
|
|
|
|
'-Wmemset-transposed-args',
|
|
|
|
'-Wmisleading-indentation',
|
|
|
|
'-Wmissing-attributes',
|
|
|
|
'-Wmissing-braces',
|
|
|
|
'-Wmissing-declarations',
|
|
|
|
'-Wmissing-field-initializers',
|
|
|
|
'-Wmissing-include-dirs',
|
|
|
|
'-Wmissing-parameter-type',
|
|
|
|
'-Wmissing-profile',
|
|
|
|
'-Wmissing-prototypes',
|
|
|
|
'-Wmultichar',
|
|
|
|
'-Wmultistatement-macros',
|
|
|
|
'-Wnarrowing',
|
|
|
|
'-Wnested-externs',
|
|
|
|
'-Wnonnull',
|
|
|
|
'-Wnonnull-compare',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wnormalized=nfc',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wnull-dereference',
|
|
|
|
'-Wodr',
|
|
|
|
'-Wold-style-declaration',
|
|
|
|
'-Wold-style-definition',
|
|
|
|
'-Wopenmp-simd',
|
|
|
|
'-Woverflow',
|
|
|
|
'-Woverride-init',
|
|
|
|
'-Wpacked-bitfield-compat',
|
|
|
|
'-Wpacked-not-aligned',
|
|
|
|
'-Wparentheses',
|
|
|
|
'-Wpointer-arith',
|
|
|
|
'-Wpointer-compare',
|
|
|
|
'-Wpointer-sign',
|
|
|
|
'-Wpointer-to-int-cast',
|
|
|
|
'-Wpragmas',
|
|
|
|
'-Wpsabi',
|
|
|
|
'-Wrestrict',
|
|
|
|
'-Wreturn-local-addr',
|
|
|
|
'-Wreturn-type',
|
|
|
|
'-Wscalar-storage-order',
|
|
|
|
'-Wsequence-point',
|
|
|
|
'-Wshadow',
|
|
|
|
'-Wshift-count-negative',
|
|
|
|
'-Wshift-count-overflow',
|
|
|
|
'-Wshift-negative-value',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wshift-overflow=2',
|
|
|
|
# So we have -W enabled, and then have to explicitly turn off...
|
|
|
|
'-Wno-sign-compare',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wsizeof-array-argument',
|
|
|
|
'-Wsizeof-pointer-div',
|
|
|
|
'-Wsizeof-pointer-memaccess',
|
|
|
|
'-Wstrict-aliasing',
|
|
|
|
'-Wstrict-prototypes',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wstringop-overflow=2',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wstringop-truncation',
|
|
|
|
'-Wsuggest-attribute=cold',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wno-suggest-attribute=const',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wsuggest-attribute=format',
|
|
|
|
'-Wsuggest-attribute=noreturn',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wno-suggest-attribute=pure',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wsuggest-final-methods',
|
|
|
|
'-Wsuggest-final-types',
|
|
|
|
'-Wswitch',
|
|
|
|
'-Wswitch-bool',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wswitch-enum',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wswitch-unreachable',
|
|
|
|
'-Wsync-nand',
|
|
|
|
'-Wtautological-compare',
|
|
|
|
'-Wtrampolines',
|
|
|
|
'-Wtrigraphs',
|
|
|
|
'-Wtype-limits',
|
2021-04-08 10:23:03 +00:00
|
|
|
# Clang incorrectly complains about dup typedefs win gnu99 mode
|
|
|
|
# so use this Clang-specific arg to keep it quiet
|
|
|
|
'-Wno-typedef-redefinition',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wuninitialized',
|
|
|
|
'-Wunknown-pragmas',
|
|
|
|
'-Wunused',
|
|
|
|
'-Wunused-but-set-parameter',
|
|
|
|
'-Wunused-but-set-variable',
|
2021-04-08 10:23:03 +00:00
|
|
|
'-Wunused-const-variable=2',
|
2020-04-30 12:50:46 +00:00
|
|
|
'-Wunused-function',
|
|
|
|
'-Wunused-label',
|
|
|
|
'-Wunused-local-typedefs',
|
|
|
|
'-Wunused-parameter',
|
|
|
|
'-Wunused-result',
|
|
|
|
'-Wunused-value',
|
|
|
|
'-Wunused-variable',
|
|
|
|
'-Wvarargs',
|
|
|
|
'-Wvariadic-macros',
|
|
|
|
'-Wvector-operation-performance',
|
|
|
|
'-Wvla',
|
|
|
|
'-Wvolatile-register-var',
|
|
|
|
'-Wwrite-strings',
|
|
|
|
]
|
|
|
|
|
meson: stop CLang doing inter-procedural analysis
The virNumaNodeIsAvailable function is stubbed out when building
without libnuma, such that it just returns a constant value. When
CLang is optimizing, it does inter-procedural analysis across
function calls. When it sees that the call to virNumaNodeIsAvailable
returns a fixed constant, it elides the conditional check for errors
in the callers such as virNumaNodesetIsAvailable.
This is a valid optimization as the C standard declares that there
must only be one implementation of each function in a binary. This
is normally the case, but ELF allows for function overrides when
linking or at runtime with LD_PRELOAD, which is technically outside
the mandated C language behaviour.
So while CLang's optimization works fine at runtime, it breaks in our
test suite which aims to mock the virNumaNodeIsAvailable function so
that it has specific semantics regardless of whether libnuma is built
or not. The return value check optimization though means our mock
override won't have the right effect. The mock will be invoked, but
its return value is not used.
Potentially the same problem could be exhibited with GCC if certain
combinations of optimizations are enabled, though thus far we've
not seen it.
To be robust on both CLang and GCC we need to make it more explicit
that we want to be able to replace functions and thus optimization
of calls must be limited. Currently we rely on 'noinline' which
does successfully prevent inlining of the function, but it cannot
stop the eliding of checks based on the constant return value.
Thus we need a bigger hammer.
There are a couple of options to disable this optimization:
* Annotate a symbol as 'weak'. This is tells the compiler
that the symbol is intended to be overridable at linktime
or runtime, and thus it will avoid doing inter-procedural
analysis for optimizations. This was tried previously but
have to be reverted as it had unintended consequences
when linking .a files into our final .so, resulting in all
the weak symbol impls being lost. See commit
407a281a8e2b6c5078ba1148535663ea64fd9314
* Annotate a symbol with 'noipa'. This tells the compiler
to avoid inter-procedural analysis for calls to just this
function. This would be ideal match for our scenario, but
unfortunately it is only implemented for GCC currently:
https://reviews.llvm.org/D101011
* The '-fsemantic-interposition' argument tells the optimizer
that any functions may be replaced with alternative
implementations that have different semantics. It thus
blocks any optimizations across function calls. This is
quite a harsh block on the optimizer, but it appears to be
the only one that is viable with CLang.
Out of those choices option (3) is the only viable option for
CLang. We don't want todo it for GCC though as it is such a
big hammer. Probably we should apply (2) for GCC, should we
experiance a problem in future.
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2023-03-16 12:50:56 +00:00
|
|
|
if cc.get_id() == 'clang'
|
|
|
|
# Stop CLang from doing inter-procedural analysis of calls
|
|
|
|
# between functions in the same compilation unit. Such an
|
|
|
|
# optimization has been know to break the test suite by
|
|
|
|
# making assumptions that a return value is a constant.
|
|
|
|
# This makes it impossible to mock certain functions with
|
|
|
|
# replacement definitions via LD_PRELOAD that have different
|
|
|
|
# semantics.
|
|
|
|
#
|
|
|
|
# This is a bit of a big hammer, but alternatives don't work:
|
|
|
|
#
|
|
|
|
# - 'weak' attribute - weak symbols get dropped from
|
|
|
|
# when the .a libs are combined into the .so
|
|
|
|
# see commit 407a281a8e2b6c5078ba1148535663ea64fd9314
|
|
|
|
#
|
|
|
|
# - 'noipa' attribute - only available with GCC currently
|
|
|
|
# https://reviews.llvm.org/D101011
|
|
|
|
cc_flags += [ '-fsemantic-interposition' ]
|
|
|
|
endif
|
|
|
|
|
meson: Disable -fsanitize=function
Strictly speaking, xdrproc_t is declared as following:
typedef bool_t (*xdrproc_t)(XDR *, ...);
But our rpcgen generates properly typed functions, e.g.:
bool_t xdr_virNetMessageError(XDR *xdrs, virNetMessageError *objp)
Now, these functions of ours are passed around as callbacks (via
an argument of xdrproc_t type), for instance in
virNetMessageEncodePayload(). But these two types are strictly
different. We silence the compiler by typecasting the callbacks
when passing them, but strictly speaking - calling such callback
later, when a function of xdrproc_t is expected is an undefined
behavior.
Ideally, we would fix our rpcgen to generate proper function
headers, but: a) my brain is too small to do that, and b) we
would lose compiler protection if an xdr_*() function is called
directly but argument of a wrong type is passed.
Silence UBSAN for now.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2024-05-03 15:58:05 +00:00
|
|
|
if get_option('b_sanitize') != 'none'
|
|
|
|
# This is needed because of xdrproc_t. It's declared as a pointer to a
|
|
|
|
# function with variable arguments. But for catching type related problems at
|
|
|
|
# compile time, our rpcgen generates functions with proper types, say:
|
|
|
|
#
|
|
|
|
# bool_t xdr_TestEnum(XDR *, TestEnum *);
|
|
|
|
#
|
|
|
|
# But passing xdr_TestEnum as a callback where xdrproc_t type is expected is
|
|
|
|
# undefined behavior. Yet, we want the comfort of compile time checks, so
|
|
|
|
# just disable the sanitizer warning for now. It's a big hammer though.
|
|
|
|
cc_flags += [ '-fno-sanitize=function' ]
|
|
|
|
endif
|
|
|
|
|
2021-04-07 17:20:49 +00:00
|
|
|
supported_cc_flags = []
|
|
|
|
if get_option('warning_level') == '2'
|
|
|
|
supported_cc_flags = cc.get_supported_arguments(cc_flags)
|
|
|
|
|
2024-01-20 16:06:38 +00:00
|
|
|
# we prefer -fstack-protector-strong but fallback to -fstack-protector-all
|
|
|
|
fstack_cflags = cc.first_supported_argument([
|
|
|
|
'-fstack-protector-strong',
|
|
|
|
'-fstack-protector-all',
|
|
|
|
])
|
|
|
|
supported_cc_flags += fstack_cflags
|
|
|
|
|
|
|
|
# When building with mingw using -fstack-protector requires libssp library
|
|
|
|
# which is included by using -fstack-protector with linker.
|
|
|
|
if fstack_cflags.length() == 1 and host_machine.system() == 'windows'
|
|
|
|
add_project_link_arguments(fstack_cflags, language: 'c')
|
2020-07-24 14:35:03 +00:00
|
|
|
endif
|
|
|
|
|
2021-04-07 17:20:49 +00:00
|
|
|
if supported_cc_flags.contains('-Wlogical-op')
|
|
|
|
# Broken in 6.0 and later
|
|
|
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69602
|
2021-05-27 16:29:02 +00:00
|
|
|
w_logical_op_args = [ '-O2', '-Wlogical-op', '-Werror' ]
|
2021-04-07 17:20:49 +00:00
|
|
|
w_logical_op_code = '''
|
|
|
|
#define TEST1 1
|
|
|
|
#define TEST2 TEST1
|
|
|
|
|
|
|
|
int main(void) {
|
|
|
|
int test = 0;
|
|
|
|
return test == TEST1 || test == TEST2;
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
if not cc.compiles(w_logical_op_code, args: w_logical_op_args)
|
|
|
|
conf.set('BROKEN_GCC_WLOGICALOP_EQUAL_EXPR', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
# Check whether clang gives bogus warning for -Wdouble-promotion.
|
2021-05-27 16:29:02 +00:00
|
|
|
w_double_promotion_args = [ '-O2', '-Wdouble-promotion', '-Werror' ]
|
2021-04-07 17:20:49 +00:00
|
|
|
w_double_promotion_code = '''
|
|
|
|
#include <math.h>
|
2020-07-24 14:35:03 +00:00
|
|
|
|
|
|
|
int main(void) {
|
2021-04-07 17:20:49 +00:00
|
|
|
float f = 0.0;
|
|
|
|
return isnan(f);
|
2020-07-24 14:35:03 +00:00
|
|
|
}
|
|
|
|
'''
|
2021-04-07 17:20:49 +00:00
|
|
|
if cc.compiles(w_double_promotion_code, args: w_double_promotion_args, name: '-Wdouble-promotion')
|
2021-05-27 16:29:02 +00:00
|
|
|
supported_cc_flags += [ '-Wdouble-promotion' ]
|
2020-07-24 14:35:03 +00:00
|
|
|
endif
|
|
|
|
|
2021-04-07 17:20:49 +00:00
|
|
|
# Clang complains about unused static inline functions which are common
|
|
|
|
# with G_DEFINE_AUTOPTR_CLEANUP_FUNC.
|
2021-05-27 16:29:02 +00:00
|
|
|
w_unused_function_args = [ '-Wunused-function', '-Werror' ]
|
2021-04-07 17:20:49 +00:00
|
|
|
w_unused_function_code = '''
|
|
|
|
static inline void foo(void) {}
|
2020-07-24 14:35:03 +00:00
|
|
|
|
2021-04-07 17:20:49 +00:00
|
|
|
int main(void) { return 0; }
|
|
|
|
'''
|
|
|
|
# -Wunused-function is implied by -Wall, we must turn it off explicitly.
|
|
|
|
if not cc.compiles(w_unused_function_code, args: w_unused_function_args)
|
2021-05-27 16:29:02 +00:00
|
|
|
supported_cc_flags += [ '-Wno-unused-function' ]
|
2021-04-07 17:20:49 +00:00
|
|
|
endif
|
2020-07-24 14:35:03 +00:00
|
|
|
|
2021-04-07 17:20:49 +00:00
|
|
|
endif
|
2020-07-24 14:35:03 +00:00
|
|
|
add_project_arguments(supported_cc_flags, language: 'c')
|
|
|
|
|
|
|
|
if cc.has_argument('-Wsuggest-attribute=format')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_SUGGEST_ATTRIBUTE_FORMAT', 1)
|
2020-07-24 14:35:03 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
# used in tests
|
2021-04-07 17:19:32 +00:00
|
|
|
cc_flags_relaxed_frame_limit = []
|
|
|
|
if cc.has_argument('-Wframe-larger-than=262144')
|
|
|
|
cc_flags_relaxed_frame_limit += [
|
2020-07-24 14:35:03 +00:00
|
|
|
'-Wframe-larger-than=262144',
|
2021-04-07 17:19:32 +00:00
|
|
|
]
|
|
|
|
endif
|
2020-04-30 12:50:46 +00:00
|
|
|
|
2020-06-24 11:22:55 +00:00
|
|
|
# various linker checks
|
|
|
|
|
|
|
|
libvirt_relro = cc.get_supported_link_arguments([
|
|
|
|
'-Wl,-z,relro',
|
|
|
|
'-Wl,-z,now',
|
|
|
|
])
|
|
|
|
|
|
|
|
libvirt_nodelete = cc.get_supported_link_arguments([
|
|
|
|
'-Wl,-z,nodelete',
|
|
|
|
])
|
|
|
|
|
2021-05-06 15:08:33 +00:00
|
|
|
libvirt_no_undefined = []
|
|
|
|
if get_option('b_sanitize') == 'none'
|
|
|
|
libvirt_no_undefined += cc.get_supported_link_arguments([
|
|
|
|
'-Wl,-z,defs',
|
|
|
|
])
|
|
|
|
endif
|
2020-06-24 11:22:55 +00:00
|
|
|
|
|
|
|
libvirt_no_indirect = cc.get_supported_link_arguments([
|
|
|
|
'-Wl,--no-copy-dt-needed-entries',
|
|
|
|
])
|
|
|
|
|
2023-11-03 17:07:19 +00:00
|
|
|
libvirt_no_warn_duplicate_libraries = cc.get_supported_link_arguments([
|
|
|
|
'-Wl,-no_warn_duplicate_libraries',
|
|
|
|
])
|
|
|
|
|
2020-06-24 11:22:55 +00:00
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
version_script_flags = '-Wl,'
|
2022-05-05 13:29:00 +00:00
|
|
|
elif host_machine.system() == 'darwin'
|
|
|
|
# macOS libraries don't support symbol versioning
|
|
|
|
version_script_flags = ''
|
2020-06-24 11:22:55 +00:00
|
|
|
else
|
2023-03-20 12:38:27 +00:00
|
|
|
version_script_flags = '-Wl,--version-script='
|
2020-06-24 11:22:55 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
libvirt_flat_namespace = []
|
|
|
|
if host_machine.system() == 'darwin'
|
|
|
|
libvirt_flat_namespace = '-Wl,-flat_namespace'
|
|
|
|
endif
|
|
|
|
|
|
|
|
libvirt_export_dynamic = cc.first_supported_link_argument([
|
|
|
|
'-Wl,-export-dynamic',
|
|
|
|
'-Wl,-export_dynamic',
|
|
|
|
])
|
|
|
|
|
|
|
|
|
2020-07-10 07:48:30 +00:00
|
|
|
# check availability of various common functions (non-fatal if missing)
|
|
|
|
|
|
|
|
functions = [
|
|
|
|
'elf_aux_info',
|
2022-12-12 10:20:36 +00:00
|
|
|
'explicit_bzero',
|
2020-07-10 07:48:30 +00:00
|
|
|
'fallocate',
|
|
|
|
'getauxval',
|
|
|
|
'getegid',
|
|
|
|
'geteuid',
|
|
|
|
'getgid',
|
|
|
|
'getifaddrs',
|
|
|
|
'getmntent_r',
|
|
|
|
'getpwuid_r',
|
|
|
|
'getrlimit',
|
|
|
|
'getuid',
|
|
|
|
'getutxid',
|
|
|
|
'if_indextoname',
|
|
|
|
'mmap',
|
|
|
|
'newlocale',
|
|
|
|
'pipe2',
|
|
|
|
'posix_fallocate',
|
|
|
|
'posix_memalign',
|
|
|
|
'prlimit',
|
2024-02-27 14:58:27 +00:00
|
|
|
'sched_get_priority_min',
|
meson: Restore check for sched_getaffinity()
Commit c07cf0a68693 replaced this check with one for the
presence of cpu_set_t.
The idea at the time was that only sched_{get,set}affinity()
were visible by default, while making cpu_set_t visible required
defining _WITH_CPU_SET_T. So libvirt would detect the function
and attempt to use it, but the code would not compile because
the necessary data type had not been made accessible.
The commit in question brought three FreeBSD commits as evidence
of this. While [1] and [2] do indeed seem to support this
explanation, [3] from just a few days later made it so that not
just cpu_set_t, but also the functions, required user action to
be visible. This arguably would have made the change unnecessary.
However, [4] from roughly a month later changed things once
again: it completely removed _WITH_CPU_SET_T, making both the
functions and the data type visible by default.
This is the status quo that seems to have persisted until
today. If one were to check any recent FreeBSD build job
performed as part of our CI pipeline, for example [5] and [6]
for FreeBSD 13 and 14 respectively, they would be able to
confirm that in both cases cpu_set_t is detected as available.
Since there is no longer a difference between the availability
of the functions and that of the data type, go back to what we
had before.
This has the interesting side-effect of fixing a bug
introduced by the commit in question.
When detection was changed from the function to the data type,
most uses of WITH_SCHED_GETAFFINITY were replaced with uses of
WITH_DECL_CPU_SET_T, but not all of them: specifically, those
that decided whether qemuProcessInitCpuAffinity() would be
actually implemented or replaced with a no-op stub were not
updated, which means that we've been running the stub version
everywhere except on FreeBSD ever since.
The code has been copied to the Cloud Hypervisor driver in
the meantime, which is similarly affected. Now that we're
building the actual implementation, we need to add virnuma.h
to the includes.
As a nice bonus this also makes things work correctly on
GNU/Hurd, where cpu_set_t is available but
sched_{get,set}affinity() are non-working stubs.
[1] https://cgit.freebsd.org/src/commit/?id=160b4b922b6021848b6b48afc894d16b879b7af2
[2] https://cgit.freebsd.org/src/commit/?id=43736b71dd051212d5c55be9fa21c45993017fbb
[3] https://cgit.freebsd.org/src/commit/?id=90fa9705d5cd29cf11c5dc7319299788dec2546a
[4] https://cgit.freebsd.org/src/commit/?id=5e04571cf3cf4024be926976a6abf19626df30be
[5] https://gitlab.com/libvirt/libvirt/-/jobs/6266401204
[6] https://gitlab.com/libvirt/libvirt/-/jobs/6266401205
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2024-02-27 14:52:15 +00:00
|
|
|
'sched_getaffinity',
|
2020-07-10 07:48:30 +00:00
|
|
|
'sched_setscheduler',
|
|
|
|
'setgroups',
|
|
|
|
'setrlimit',
|
|
|
|
'symlink',
|
|
|
|
'sysctlbyname',
|
|
|
|
]
|
|
|
|
|
tests: fix stat mocking with Fedora rawhide
GLibC has a really complicated way of dealing with the 'stat' function
historically, which means our mocks in turn have to look at four
different possible functions to replace, stat, stat64, __xstat,
__xstat64.
In Fedora 33 and earlier:
- libvirt.so links to __xstat64
- libc.so library exports stat, stat64, __xstat, __xstat64
- sys/stat.h header exposes stat and __xstat
In Fedora 34 rawhide:
- libvirt.so links to stat64
- libc.so library exports stat, stat64, __xstat, __xstat64
- sys/stat.h header exposes stat
Historically we only looked at the exported symbols from libc.so to
decide which to mock.
In F34 though we must not consider __xstat / __xstat64 though because
they only existance for binary compatibility. Newly built binaries
won't reference them.
Thus we must introduce a header file check into our logic for deciding
which symbol to mock. We must ignore the __xstat / __xstat64 symbols
if they don't appear in the sys/stat.h header, even if they appear
in libc.so
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-10-29 17:25:07 +00:00
|
|
|
stat_functions = [
|
|
|
|
'__lxstat',
|
|
|
|
'__lxstat64',
|
|
|
|
'__xstat',
|
|
|
|
'__xstat64',
|
|
|
|
'lstat',
|
|
|
|
'lstat64',
|
|
|
|
'stat',
|
|
|
|
'stat64',
|
|
|
|
]
|
|
|
|
|
|
|
|
functions += stat_functions
|
|
|
|
|
2024-03-13 16:25:35 +00:00
|
|
|
open_functions = [
|
|
|
|
'__open_2',
|
|
|
|
]
|
|
|
|
|
|
|
|
functions += open_functions
|
|
|
|
|
2020-07-10 07:48:30 +00:00
|
|
|
foreach function : functions
|
|
|
|
if cc.has_function(function)
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_@0@'.format(function.to_upper()), 1)
|
2020-07-10 07:48:30 +00:00
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
tests: fix stat mocking with Fedora rawhide
GLibC has a really complicated way of dealing with the 'stat' function
historically, which means our mocks in turn have to look at four
different possible functions to replace, stat, stat64, __xstat,
__xstat64.
In Fedora 33 and earlier:
- libvirt.so links to __xstat64
- libc.so library exports stat, stat64, __xstat, __xstat64
- sys/stat.h header exposes stat and __xstat
In Fedora 34 rawhide:
- libvirt.so links to stat64
- libc.so library exports stat, stat64, __xstat, __xstat64
- sys/stat.h header exposes stat
Historically we only looked at the exported symbols from libc.so to
decide which to mock.
In F34 though we must not consider __xstat / __xstat64 though because
they only existance for binary compatibility. Newly built binaries
won't reference them.
Thus we must introduce a header file check into our logic for deciding
which symbol to mock. We must ignore the __xstat / __xstat64 symbols
if they don't appear in the sys/stat.h header, even if they appear
in libc.so
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-10-29 17:25:07 +00:00
|
|
|
foreach function : stat_functions
|
|
|
|
if cc.has_header_symbol('sys/stat.h', function)
|
|
|
|
conf.set('WITH_@0@_DECL'.format(function.to_upper()), 1)
|
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
2024-03-13 16:25:35 +00:00
|
|
|
foreach function : open_functions
|
|
|
|
if cc.has_header_symbol('fcntl.h', function)
|
|
|
|
conf.set('WITH_@0@_DECL'.format(function.to_upper()), 1)
|
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
2020-07-21 13:23:48 +00:00
|
|
|
# various header checks
|
|
|
|
|
|
|
|
headers = [
|
|
|
|
'asm/hwcap.h',
|
|
|
|
'ifaddrs.h',
|
|
|
|
'libtasn1.h',
|
|
|
|
'linux/kvm.h',
|
|
|
|
'mntent.h',
|
|
|
|
'net/ethernet.h',
|
|
|
|
'net/if.h',
|
|
|
|
'pty.h',
|
|
|
|
'pwd.h',
|
2021-11-21 11:56:06 +00:00
|
|
|
'sched.h',
|
2021-02-05 14:03:32 +00:00
|
|
|
'sys/auxv.h',
|
2020-07-21 13:23:48 +00:00
|
|
|
'sys/ioctl.h',
|
2024-09-18 13:32:45 +00:00
|
|
|
'sys/mman.h',
|
2020-07-21 13:23:48 +00:00
|
|
|
'sys/mount.h',
|
|
|
|
'sys/syscall.h',
|
|
|
|
'sys/ucred.h',
|
|
|
|
'syslog.h',
|
|
|
|
'util.h',
|
|
|
|
'xlocale.h',
|
|
|
|
]
|
|
|
|
|
2020-09-25 09:19:03 +00:00
|
|
|
if host_machine.system() == 'freebsd'
|
|
|
|
headers += 'libutil.h'
|
|
|
|
endif
|
|
|
|
|
2020-07-21 13:23:48 +00:00
|
|
|
foreach name : headers
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if cc.check_header(name)
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_@0@'.format(name.underscorify().to_upper()), 1)
|
2020-07-21 13:23:48 +00:00
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
2022-12-08 10:07:19 +00:00
|
|
|
# check for kernel header required by src/util/virnetdevbridge.c
|
2020-07-21 13:23:48 +00:00
|
|
|
if host_machine.system() == 'linux'
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if not cc.check_header('linux/sockios.h')
|
2022-12-08 10:07:19 +00:00
|
|
|
error('You must install kernel-headers in order to compile libvirt with QEMU or LXC support')
|
|
|
|
endif
|
2020-07-21 13:23:48 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-06-30 12:06:50 +00:00
|
|
|
# check various symbols
|
|
|
|
|
|
|
|
symbols = [
|
|
|
|
# Check whether endian provides handy macros.
|
|
|
|
[ 'endian.h', 'htole64' ],
|
|
|
|
|
|
|
|
[ 'unistd.h', 'SEEK_HOLE' ],
|
|
|
|
|
|
|
|
# Check for BSD approach for setting MAC addr
|
2020-08-08 09:16:16 +00:00
|
|
|
[ 'net/if_dl.h', 'link_addr', '#include <sys/types.h>\n#include <sys/socket.h>' ],
|
2020-06-30 12:06:50 +00:00
|
|
|
]
|
|
|
|
|
2022-10-05 17:03:33 +00:00
|
|
|
if host_machine.system() == 'linux'
|
|
|
|
symbols += [
|
|
|
|
# process management
|
|
|
|
[ 'sys/syscall.h', 'SYS_pidfd_open' ],
|
2024-04-16 14:32:26 +00:00
|
|
|
# vsock
|
|
|
|
[ 'linux/vm_sockets.h', 'struct sockaddr_vm', '#include <sys/socket.h>' ],
|
2022-10-05 17:03:33 +00:00
|
|
|
]
|
|
|
|
endif
|
|
|
|
|
2020-06-30 12:06:50 +00:00
|
|
|
foreach symbol : symbols
|
2020-08-08 09:16:16 +00:00
|
|
|
if cc.has_header_symbol(symbol[0], symbol[1], args: '-D_GNU_SOURCE', prefix: symbol.get(2, ''))
|
2024-04-16 14:32:26 +00:00
|
|
|
conf.set('WITH_DECL_@0@'.format(symbol[1].underscorify().to_upper()), 1)
|
2020-06-30 12:06:50 +00:00
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
# Check for BSD approach for bridge management
|
2020-08-08 09:16:16 +00:00
|
|
|
brd_required_headers = '''#include <stdint.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/ethernet.h>'''
|
|
|
|
if (cc.has_header_symbol('net/if_bridgevar.h', 'BRDGSFD', prefix: brd_required_headers) and
|
|
|
|
cc.has_header_symbol('net/if_bridgevar.h', 'BRDGADD', prefix: brd_required_headers) and
|
|
|
|
cc.has_header_symbol('net/if_bridgevar.h', 'BRDGDEL', prefix: brd_required_headers))
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_BSD_BRIDGE_MGMT', 1)
|
2020-06-30 12:06:50 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
# Check for BSD CPU affinity availability
|
2021-11-23 11:19:10 +00:00
|
|
|
if cc.has_header_symbol('sys/cpuset.h', 'cpuset_getaffinity', prefix: '#include <sys/param.h>')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_BSD_CPU_AFFINITY', 1)
|
2020-06-30 12:06:50 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
# whether Mach clock routines are available
|
|
|
|
if (cc.has_header_symbol('mach/clock.h', 'clock_serv_t') and
|
|
|
|
cc.has_header_symbol('mach/clock.h', 'host_get_clock_service') and
|
|
|
|
cc.has_header_symbol('mach/clock.h', 'clock_get_time'))
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_MACH_CLOCK_ROUTINES', 1)
|
2020-06-30 12:06:50 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-06-30 12:07:07 +00:00
|
|
|
# check various types
|
|
|
|
|
|
|
|
types = [
|
|
|
|
[ 'struct ifreq', '#include <sys/socket.h>\n#include <net/if.h>'] ,
|
|
|
|
[ 'struct sockpeercred', '#include <sys/socket.h' ],
|
|
|
|
]
|
|
|
|
|
|
|
|
foreach type : types
|
|
|
|
if cc.has_type(type[0], prefix: type[1])
|
|
|
|
name = type[0].underscorify().to_upper()
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_@0@'.format(name), 1)
|
2020-06-30 12:07:07 +00:00
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
uid_types = [
|
|
|
|
'uid_t',
|
|
|
|
'gid_t',
|
|
|
|
]
|
|
|
|
foreach type : uid_types
|
|
|
|
if not cc.has_type(type, prefix: '#include <sys/types.h>')
|
|
|
|
conf.set(type, 'int')
|
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-06-30 12:17:11 +00:00
|
|
|
# check various members
|
|
|
|
|
|
|
|
members = [
|
|
|
|
# Check for Linux vs. BSD ifreq members
|
|
|
|
[ 'struct ifreq', 'ifr_newname', '#include <sys/socket.h>\n#include <net/if.h>' ],
|
|
|
|
[ 'struct ifreq', 'ifr_ifindex', '#include <sys/socket.h>\n#include <net/if.h>' ],
|
|
|
|
[ 'struct ifreq', 'ifr_index', '#include <sys/socket.h>\n#include <net/if.h>' ],
|
|
|
|
[ 'struct ifreq', 'ifr_hwaddr', '#include <sys/socket.h>\n#include <net/if.h>' ],
|
|
|
|
]
|
|
|
|
|
|
|
|
foreach member : members
|
|
|
|
if cc.has_member(member[0], member[1], prefix: member[2])
|
|
|
|
type = member[0].underscorify().to_upper()
|
|
|
|
member = member[1].underscorify().to_upper()
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_@0@_@1@'.format(type, member), 1)
|
2020-06-30 12:17:11 +00:00
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
2020-06-30 12:07:19 +00:00
|
|
|
# check various types sizeof
|
|
|
|
|
|
|
|
conf.set('SIZEOF_LONG', cc.sizeof('long'))
|
|
|
|
|
|
|
|
|
2020-06-30 17:53:09 +00:00
|
|
|
# Where we look for daemons and admin binaries during configure
|
|
|
|
|
2023-04-17 11:53:56 +00:00
|
|
|
libvirt_sbin_path = []
|
|
|
|
|
|
|
|
if host_machine.system() != 'windows'
|
|
|
|
libvirt_sbin_path += [
|
|
|
|
'/sbin',
|
|
|
|
'/usr/sbin',
|
|
|
|
'/usr/local/sbin',
|
|
|
|
]
|
|
|
|
endif
|
2020-06-30 17:53:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
# required programs check
|
|
|
|
|
|
|
|
required_programs = [
|
|
|
|
'perl',
|
|
|
|
'python3',
|
|
|
|
'xmllint',
|
|
|
|
'xsltproc',
|
|
|
|
]
|
|
|
|
|
|
|
|
if host_machine.system() == 'freebsd'
|
|
|
|
required_programs += 'ifconfig'
|
|
|
|
endif
|
|
|
|
|
|
|
|
foreach name : required_programs
|
2021-05-27 17:04:10 +00:00
|
|
|
prog = find_program(name, dirs: libvirt_sbin_path)
|
2020-06-30 17:53:09 +00:00
|
|
|
varname = name.underscorify()
|
2022-10-07 07:43:33 +00:00
|
|
|
conf.set_quoted(varname.to_upper(), prog.full_path())
|
2020-06-30 17:53:09 +00:00
|
|
|
set_variable('@0@_prog'.format(varname), prog)
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
# optional programs
|
|
|
|
|
2024-02-19 09:23:27 +00:00
|
|
|
optional_test_programs = [
|
2020-06-30 17:53:09 +00:00
|
|
|
'augparse',
|
2023-02-01 16:19:16 +00:00
|
|
|
'black',
|
2024-02-19 09:23:27 +00:00
|
|
|
'flake8',
|
|
|
|
'pdwtags',
|
|
|
|
'pytest',
|
|
|
|
]
|
|
|
|
|
|
|
|
optional_programs = [
|
2020-06-30 17:53:09 +00:00
|
|
|
'dmidecode',
|
|
|
|
'ip',
|
2020-06-24 10:17:39 +00:00
|
|
|
'iscsiadm',
|
2020-06-30 17:53:09 +00:00
|
|
|
'mdevctl',
|
|
|
|
'mm-ctl',
|
|
|
|
'modprobe',
|
|
|
|
'ovs-vsctl',
|
|
|
|
'rmmod',
|
|
|
|
'tc',
|
2024-02-19 09:23:27 +00:00
|
|
|
] + optional_test_programs
|
2020-06-30 17:53:09 +00:00
|
|
|
|
2024-02-19 09:23:35 +00:00
|
|
|
missing_optional_programs = []
|
2020-06-30 17:53:09 +00:00
|
|
|
foreach name : optional_programs
|
|
|
|
prog = find_program(name, required: false, dirs: libvirt_sbin_path)
|
|
|
|
varname = name.underscorify()
|
|
|
|
if prog.found()
|
2022-10-07 07:43:33 +00:00
|
|
|
prog_path = prog.full_path()
|
2020-06-30 17:53:09 +00:00
|
|
|
else
|
|
|
|
prog_path = name
|
2024-02-19 09:23:35 +00:00
|
|
|
if name in optional_test_programs
|
|
|
|
missing_optional_programs += [ name ]
|
|
|
|
endif
|
2020-06-30 17:53:09 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
conf.set_quoted(varname.to_upper(), prog_path)
|
|
|
|
set_variable('@0@_prog'.format(varname), prog)
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
2021-07-07 11:00:12 +00:00
|
|
|
# early checks where lot of other packages depend on the result
|
|
|
|
|
|
|
|
if not get_option('driver_remote').disabled()
|
2021-12-08 08:32:55 +00:00
|
|
|
# On MinGW portablexdr provides XDR functions, on linux they are
|
|
|
|
# provided by libtirpc and on FreeBSD/macOS there is no need to
|
|
|
|
# use extra library as it's provided by libc directly.
|
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
xdr_dep = cc.find_library('portablexdr', required: get_option('driver_remote'))
|
2023-10-09 21:17:10 +00:00
|
|
|
elif host_machine.system() in [ 'linux', 'gnu' ]
|
2021-12-08 08:32:55 +00:00
|
|
|
xdr_dep = dependency('libtirpc', required: get_option('driver_remote'))
|
|
|
|
elif host_machine.system() in [ 'freebsd', 'darwin' ]
|
|
|
|
xdr_dep = cc.find_library('c', required: get_option('driver_remote'))
|
|
|
|
else
|
|
|
|
xdr_dep = dependency('', required: false)
|
2021-07-07 11:00:12 +00:00
|
|
|
endif
|
|
|
|
|
2021-12-08 08:32:55 +00:00
|
|
|
if xdr_dep.found()
|
2021-07-07 11:00:12 +00:00
|
|
|
conf.set('WITH_REMOTE', 1)
|
2021-12-08 08:32:55 +00:00
|
|
|
elif get_option('driver_remote').enabled()
|
|
|
|
error('XDR is required for remote driver')
|
2021-07-07 11:00:12 +00:00
|
|
|
endif
|
2021-12-08 08:32:55 +00:00
|
|
|
else
|
|
|
|
xdr_dep = dependency('', required: false)
|
2021-07-07 11:00:12 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-03-02 14:14:14 +00:00
|
|
|
# generic build dependencies
|
|
|
|
|
2023-03-23 08:12:52 +00:00
|
|
|
acl_dep = dependency('libacl', required: false)
|
2021-05-26 16:42:40 +00:00
|
|
|
if acl_dep.found()
|
|
|
|
conf.set('WITH_LIBACL', 1)
|
2020-03-02 14:14:14 +00:00
|
|
|
endif
|
|
|
|
|
2020-07-29 12:19:59 +00:00
|
|
|
apparmor_dep = dependency('libapparmor', required: get_option('apparmor'))
|
|
|
|
if apparmor_dep.found()
|
|
|
|
conf.set('WITH_APPARMOR', 1)
|
2023-06-29 09:25:12 +00:00
|
|
|
if apparmor_dep.version().version_compare('>=3.0.0')
|
|
|
|
conf.set('WITH_APPARMOR_3', 1)
|
|
|
|
endif
|
2020-08-18 23:39:19 +00:00
|
|
|
conf.set_quoted('APPARMOR_DIR', sysconfdir / 'apparmor.d')
|
2020-07-29 12:19:59 +00:00
|
|
|
conf.set_quoted('APPARMOR_PROFILES_PATH', '/sys/kernel/security/apparmor/profiles')
|
|
|
|
endif
|
|
|
|
|
2021-05-27 13:20:24 +00:00
|
|
|
if not get_option('apparmor_profiles').disabled()
|
|
|
|
apparmor_profiles_enable = true
|
|
|
|
|
2021-05-27 13:17:19 +00:00
|
|
|
if not conf.has('WITH_APPARMOR')
|
2021-05-27 13:20:24 +00:00
|
|
|
apparmor_profiles_enable = false
|
|
|
|
if get_option('apparmor_profiles').enabled()
|
|
|
|
error('Cannot enable apparmor_profiles without apparmor')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if apparmor_profiles_enable
|
|
|
|
conf.set('WITH_APPARMOR_PROFILES', 1)
|
2021-05-27 13:17:19 +00:00
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2023-07-06 17:35:36 +00:00
|
|
|
# FIXME rewrite to use dependency() once we can use 2.4.48
|
|
|
|
attr_dep = cc.find_library('attr', required: get_option('attr'))
|
2020-07-29 12:20:15 +00:00
|
|
|
if attr_dep.found()
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_LIBATTR', 1)
|
2020-07-29 12:20:15 +00:00
|
|
|
endif
|
|
|
|
|
2021-05-26 16:46:20 +00:00
|
|
|
audit_dep = dependency('audit', required: get_option('audit'))
|
2020-07-29 12:20:29 +00:00
|
|
|
if audit_dep.found()
|
|
|
|
conf.set('WITH_AUDIT', 1)
|
|
|
|
endif
|
|
|
|
|
2020-06-24 11:24:53 +00:00
|
|
|
bash_completion_version = '2.0'
|
|
|
|
bash_completion_dep = dependency('bash-completion', version: '>=' + bash_completion_version, required: get_option('bash_completion'))
|
|
|
|
|
2020-06-24 11:25:04 +00:00
|
|
|
blkid_version = '2.17'
|
|
|
|
blkid_dep = dependency('blkid', version: '>=' + blkid_version, required: get_option('blkid'))
|
|
|
|
if blkid_dep.found()
|
|
|
|
conf.set('WITH_BLKID', 1)
|
|
|
|
endif
|
|
|
|
|
2024-09-11 12:36:40 +00:00
|
|
|
libbsd_dep = dependency('libbsd', required: false)
|
|
|
|
if libbsd_dep.found()
|
|
|
|
conf.set('WITH_LIBBSD', 1)
|
|
|
|
endif
|
|
|
|
|
2021-05-26 16:46:20 +00:00
|
|
|
capng_dep = dependency('libcap-ng', required: get_option('capng'))
|
2020-06-24 11:25:16 +00:00
|
|
|
if capng_dep.found()
|
|
|
|
conf.set('WITH_CAPNG', 1)
|
|
|
|
endif
|
|
|
|
|
2021-02-17 05:58:59 +00:00
|
|
|
curl_version = '7.19.1'
|
2020-06-24 11:25:26 +00:00
|
|
|
curl_dep = dependency('libcurl', version: '>=' + curl_version, required: get_option('curl'))
|
|
|
|
if curl_dep.found()
|
|
|
|
conf.set('WITH_CURL', 1)
|
|
|
|
endif
|
|
|
|
|
2020-04-29 08:32:28 +00:00
|
|
|
devmapper_version = '1.0.0'
|
|
|
|
devmapper_dep = dependency('devmapper', version: '>=' + devmapper_version, required: false)
|
|
|
|
if devmapper_dep.found()
|
|
|
|
conf.set('WITH_DEVMAPPER', 1)
|
|
|
|
endif
|
|
|
|
|
2020-04-29 08:34:31 +00:00
|
|
|
dlopen_use = host_machine.system() != 'windows'
|
|
|
|
dlopen_dep = cc.find_library('dl', required: dlopen_use)
|
|
|
|
if dlopen_dep.found()
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if not cc.check_header('dlfcn.h')
|
2020-04-29 08:34:31 +00:00
|
|
|
error('Unable to find dlfcn.h')
|
|
|
|
endif
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_DLFCN_H', 1)
|
2020-04-29 08:34:31 +00:00
|
|
|
endif
|
|
|
|
|
2022-02-22 14:21:49 +00:00
|
|
|
fuse_version = '3.1.0'
|
|
|
|
fuse_dep = dependency('fuse3', version: '>=' + fuse_version, required: false)
|
2020-07-01 01:10:10 +00:00
|
|
|
if fuse_dep.found()
|
2022-02-22 14:21:49 +00:00
|
|
|
conf.set('WITH_FUSE', 3)
|
|
|
|
else
|
|
|
|
fuse_version = '2.8.6'
|
|
|
|
fuse_dep = dependency('fuse', version: '>=' + fuse_version, required: get_option('fuse'))
|
|
|
|
if fuse_dep.found()
|
|
|
|
conf.set('WITH_FUSE', 1)
|
|
|
|
endif
|
2020-07-01 01:10:10 +00:00
|
|
|
endif
|
|
|
|
|
2024-05-03 13:48:54 +00:00
|
|
|
glib_version = '2.58.0'
|
2020-04-29 10:06:37 +00:00
|
|
|
glib_dep = dependency('glib-2.0', version: '>=' + glib_version)
|
|
|
|
gobject_dep = dependency('gobject-2.0', version: '>=' + glib_version)
|
2020-09-08 11:55:24 +00:00
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
gio_dep = dependency('gio-2.0', version: '>=' + glib_version)
|
|
|
|
else
|
|
|
|
gio_dep = dependency('gio-unix-2.0', version: '>=' + glib_version)
|
|
|
|
endif
|
2020-04-29 10:06:37 +00:00
|
|
|
glib_dep = declare_dependency(
|
|
|
|
dependencies: [ glib_dep, gobject_dep, gio_dep ],
|
|
|
|
)
|
2021-04-30 05:10:41 +00:00
|
|
|
glib_version_arr = glib_version.split('.')
|
|
|
|
glib_version_str = 'GLIB_VERSION_@0@_@1@'.format(glib_version_arr[0], glib_version_arr[1])
|
|
|
|
# Ask for warnings for anything that was marked deprecated in
|
|
|
|
# the defined version, or before. It is a candidate for rewrite.
|
|
|
|
conf.set('GLIB_VERSION_MIN_REQUIRED', glib_version_str)
|
|
|
|
# Ask for warnings if code tries to use function that did not
|
|
|
|
# exist in the defined version. These risk breaking builds
|
|
|
|
conf.set('GLIB_VERSION_MAX_ALLOWED', glib_version_str)
|
2020-04-29 10:06:37 +00:00
|
|
|
|
2020-06-24 11:26:27 +00:00
|
|
|
glusterfs_version = '3.4.1'
|
|
|
|
glusterfs_dep = dependency('glusterfs-api', version: '>=' + glusterfs_version, required: get_option('glusterfs'))
|
|
|
|
|
2022-06-29 14:01:59 +00:00
|
|
|
gnutls_version = '3.6.0'
|
2020-04-29 10:07:33 +00:00
|
|
|
gnutls_dep = dependency('gnutls', version: '>=' + gnutls_version)
|
|
|
|
|
2024-02-08 15:44:15 +00:00
|
|
|
json_c_version = '0.14'
|
|
|
|
json_c_dep = dependency('json-c', version: '>=' + json_c_version, required: get_option('json_c'))
|
|
|
|
if json_c_dep.found()
|
|
|
|
conf.set('WITH_JSON_C', 1)
|
|
|
|
conf.set('WITH_JSON', 1)
|
|
|
|
endif
|
|
|
|
|
2020-06-30 17:53:36 +00:00
|
|
|
# Check for BSD kvm (kernel memory interface)
|
|
|
|
if host_machine.system() == 'freebsd'
|
2020-10-08 11:51:00 +00:00
|
|
|
libkvm_dep = cc.find_library('kvm')
|
2020-09-04 11:52:13 +00:00
|
|
|
else
|
2020-10-08 11:51:00 +00:00
|
|
|
libkvm_dep = dependency('', required: false)
|
2020-06-30 17:53:36 +00:00
|
|
|
endif
|
|
|
|
|
2020-06-24 11:26:48 +00:00
|
|
|
libiscsi_version = '1.18.0'
|
|
|
|
libiscsi_dep = dependency('libiscsi', version: '>=' + libiscsi_version, required: get_option('libiscsi'))
|
|
|
|
|
meson: Improve nbdkit configurability
Currently, nbdkit support will automatically be enabled as long as
the pidfd_open(2) syscall is available. Optionally, libnbd is used
to generate more user-friendly error messages.
In theory this is all good, since use of nbdkit is supposed to be
transparent to the user. In practice, however, there is a problem:
if support for it is enabled at build time and the necessary
runtime components are installed, nbdkit will always be preferred,
with no way for the user to opt out.
This will arguably be fine in the long run, but right now none of
the platforms that we target ships with a SELinux policy that
allows libvirt to launch nbdkit, and the AppArmor policy that we
maintain ourselves hasn't been updated either.
So, in practice, as of today having nbdkit installed on the host
makes network disks completely unusable unless you're willing to
compromise the overall security of the system by disabling
SELinux/AppArmor.
In order to make the transition smoother, provide a convenient
way for users and distro packagers to disable nbdkit support at
compile time until SELinux and AppArmor are ready.
In the process, detection is completely overhauled. libnbd is
made mandatory when nbdkit support is enabled, since availability
across operating systems is comparable and offering users the
option to make error messages worse doesn't make a lot of sense;
we also make sure that an explicit request from the user to
enable/disable nbdkit support is either complied with, or results
in a build failure when that's not possible. Last but not least,
we avoid linking against libnbd when nbdkit support is disabled.
At the RPM level, we disable the feature when building against
anything older than Fedora 40, which still doesn't have the
necessary SELinux bits but will hopefully gain them by the time
it's released. We also allow nbdkit support to be disabled at
build time the same way as other optional features, that is, by
passing "--define '_without_nbdkit 1'" to rpmbuild. Finally, if
nbdkit support has been disabled, installing libvirt will no
longer drag it in as a (weak) dependency.
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Jonathon Jongsma <jjongsma@redhat.com>
2023-10-04 22:37:09 +00:00
|
|
|
if not get_option('nbdkit').disabled()
|
|
|
|
libnbd_version = '1.0'
|
|
|
|
libnbd_dep = dependency('libnbd', version: '>=' + libnbd_version, required: false)
|
|
|
|
|
|
|
|
nbdkit_requested = get_option('nbdkit').enabled()
|
|
|
|
nbdkit_syscall_ok = conf.has('WITH_DECL_SYS_PIDFD_OPEN')
|
|
|
|
nbdkit_libnbd_ok = libnbd_dep.found()
|
|
|
|
|
|
|
|
if not nbdkit_syscall_ok and nbdkit_requested
|
|
|
|
error('nbdkit support requires pidfd_open(2)')
|
|
|
|
endif
|
|
|
|
if not nbdkit_libnbd_ok and nbdkit_requested
|
|
|
|
error('nbdkit support requires libnbd')
|
|
|
|
endif
|
|
|
|
|
|
|
|
if nbdkit_syscall_ok and nbdkit_libnbd_ok
|
|
|
|
conf.set('WITH_NBDKIT', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
if not conf.has('WITH_NBDKIT')
|
|
|
|
libnbd_dep = dependency('', required: false)
|
qemu: try to connect to nbdkit early to detect errors
When using nbdkit to serve a network disk source, the nbdkit process
will start and wait for an nbd connection before actually attempting to
connect to the (remote) disk location. Because of this, nbdkit will not
report an error until after qemu is launched and tries to read from the
disk. This results in a fairly user-unfriendly error saying that qemu
was unable to start because "Requested export not available".
Ideally we'd like to be able to tell the user *why* the export is not
available, but this sort of information is only available to nbdkit, not
qemu. It could be because the url was incorrect, or because of an
authentication failure, or one of many other possibilities.
To make this friendlier for users and easier to detect
misconfigurations, try to connect to nbdkit immediately after starting
nbdkit and before we try to start qemu. This requires adding a
dependency on libnbd. If an error occurs when connecting to nbdkit, read
back from the nbdkit error log and provide that information in the error
report from qemuNbdkitProcessStart().
User-visible change demonstrated below:
Previous error:
$ virsh start nbdkit-test
2023-01-18 19:47:45.778+0000: 30895: error : virNetClientProgramDispatchError:172 : internal
error: process exited while connecting to monitor: 2023-01-18T19:47:45.704658Z
qemu-system-x86_64: -blockdev {"driver":"nbd","server":{"type":"unix",
"path":"/var/lib/libvirt/qemu/domain-1-nbdkit-test/nbdkit-libvirt-1-storage.socket"},
"node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}: Requested export not
available
error: Failed to start domain 'nbdkit-test'
error: internal error: process exited while connecting to monitor: 2023-01-18T19:47:45.704658Z
qemu-system-x86_64: -blockdev {"driver":"nbd","server":{"type":"unix",
"path":"/var/lib/libvirt/qemu/domain-1-nbdkit-test/nbdkit-libvirt-1-storage.socket"},
"node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}: Requested export not
available
After this change:
$ virsh start nbdkit-test
2023-01-18 19:44:36.242+0000: 30895: error : virNetClientProgramDispatchError:172 : internal
error: Failed to connect to nbdkit for 'http://localhost:8888/nonexistent.iso': nbdkit: curl[1]:
error: problem doing HEAD request to fetch size of URL [http://localhost:8888/nonexistent.iso]:
HTTP response code said error: The requested URL returned error: 404
error: Failed to start domain 'nbdkit-test'
error: internal error: Failed to connect to nbdkit for 'http://localhost:8888/nonexistent.iso]:
error: problem doing HEAD request to fetch size of URL [http://localhost:8888/nonexistent.iso]:
HTTP response code said error: The requested URL returned error: 404
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2022-12-16 23:10:49 +00:00
|
|
|
endif
|
|
|
|
|
2023-11-08 19:14:50 +00:00
|
|
|
# default value for storage_use_nbdkit config option.
|
|
|
|
# For now 'auto' just maps to disabled, but in the future it may depend on
|
|
|
|
# which security drivers are enabled
|
|
|
|
use_nbdkit_default = get_option('nbdkit_config_default').enabled()
|
|
|
|
|
|
|
|
if use_nbdkit_default and not conf.has('WITH_NBDKIT')
|
|
|
|
error('nbdkit_config_default requires nbdkit to be enabled')
|
|
|
|
endif
|
|
|
|
conf.set10('USE_NBDKIT_DEFAULT', use_nbdkit_default)
|
|
|
|
|
2020-04-29 10:08:33 +00:00
|
|
|
libnl_version = '3.0'
|
2020-10-08 11:01:29 +00:00
|
|
|
if not get_option('libnl').disabled() and host_machine.system() == 'linux'
|
|
|
|
libnl_dep = dependency('libnl-3.0', version: '>=' + libnl_version, required: get_option('libnl'))
|
|
|
|
libnl_route_dep = dependency('libnl-route-3.0', version: '>=' + libnl_version, required: get_option('libnl'))
|
2020-04-29 10:08:33 +00:00
|
|
|
|
|
|
|
if libnl_dep.found() and libnl_route_dep.found()
|
|
|
|
libnl_dep = declare_dependency(
|
|
|
|
dependencies: [ libnl_dep, libnl_route_dep ],
|
|
|
|
)
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_LIBNL', 1)
|
2020-04-29 10:08:33 +00:00
|
|
|
endif
|
2020-10-08 11:01:29 +00:00
|
|
|
elif get_option('libnl').enabled()
|
|
|
|
error('libnl can be enabled only on linux')
|
2020-04-29 10:08:33 +00:00
|
|
|
else
|
|
|
|
libnl_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
|
2020-04-29 10:08:51 +00:00
|
|
|
libparted_version = '1.8.0'
|
2022-03-24 10:53:04 +00:00
|
|
|
libparted_dep = dependency('libparted', version: '>=' + libparted_version, required: get_option('storage_disk'))
|
2020-04-29 10:08:51 +00:00
|
|
|
|
2020-06-24 11:27:03 +00:00
|
|
|
libpcap_version = '1.5.0'
|
2020-10-08 11:09:45 +00:00
|
|
|
if not get_option('libpcap').disabled()
|
2021-05-26 16:20:04 +00:00
|
|
|
libpcap_dep = dependency('pcap', version: '>=' + libpcap_version, required: get_option('libpcap'))
|
2021-05-26 15:47:58 +00:00
|
|
|
if libpcap_dep.found()
|
|
|
|
conf.set('WITH_LIBPCAP', 1)
|
|
|
|
endif
|
2020-10-08 11:09:45 +00:00
|
|
|
else
|
|
|
|
libpcap_dep = dependency('', required: false)
|
2020-06-24 11:27:03 +00:00
|
|
|
endif
|
|
|
|
|
2022-09-07 13:08:20 +00:00
|
|
|
libssh_version = '0.8.1'
|
2021-07-07 11:00:12 +00:00
|
|
|
if conf.has('WITH_REMOTE')
|
2020-06-24 11:27:12 +00:00
|
|
|
libssh_dep = dependency('libssh', version: '>=' + libssh_version, required: get_option('libssh'))
|
|
|
|
if libssh_dep.found()
|
|
|
|
conf.set('WITH_LIBSSH', 1)
|
2024-08-12 10:41:13 +00:00
|
|
|
if cc.has_function('ssh_channel_get_exit_state', dependencies: libssh_dep)
|
|
|
|
conf.set('WITH_SSH_CHANNEL_GET_EXIT_STATE', 1)
|
|
|
|
endif
|
2020-06-24 11:27:12 +00:00
|
|
|
endif
|
|
|
|
else
|
|
|
|
libssh_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
|
2020-04-29 09:07:42 +00:00
|
|
|
libssh2_version = '1.3'
|
2021-07-07 11:00:12 +00:00
|
|
|
if conf.has('WITH_REMOTE')
|
2020-04-29 09:07:42 +00:00
|
|
|
libssh2_dep = dependency('libssh2', version: '>=' + libssh2_version, required: get_option('libssh2'))
|
|
|
|
if libssh2_dep.found()
|
|
|
|
conf.set('WITH_SSH2', 1)
|
|
|
|
endif
|
|
|
|
else
|
|
|
|
libssh2_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
|
2020-06-24 11:27:22 +00:00
|
|
|
libxml_version = '2.9.1'
|
|
|
|
libxml_dep = dependency('libxml-2.0', version: '>=' + libxml_version)
|
|
|
|
|
2021-05-27 17:34:50 +00:00
|
|
|
libm_dep = cc.find_library('m')
|
2020-08-26 18:22:07 +00:00
|
|
|
|
2020-06-24 11:27:31 +00:00
|
|
|
netcf_version = '0.1.8'
|
2021-01-21 21:01:06 +00:00
|
|
|
if not get_option('netcf').disabled()
|
2021-05-27 14:09:54 +00:00
|
|
|
netcf_dep = dependency('netcf', version: '>=' + netcf_version, required: get_option('netcf'))
|
2021-01-21 21:01:06 +00:00
|
|
|
if netcf_dep.found()
|
|
|
|
conf.set('WITH_NETCF', 1)
|
|
|
|
endif
|
2021-05-27 14:09:54 +00:00
|
|
|
else
|
|
|
|
netcf_dep = dependency('', required: false)
|
2020-06-24 11:27:31 +00:00
|
|
|
endif
|
|
|
|
|
2020-06-24 11:27:40 +00:00
|
|
|
have_gnu_gettext_tools = false
|
|
|
|
if not get_option('nls').disabled()
|
|
|
|
have_gettext = cc.has_function('gettext')
|
|
|
|
if not have_gettext
|
2020-09-04 11:52:13 +00:00
|
|
|
intl_dep = cc.find_library('intl', required: false)
|
|
|
|
have_gettext = intl_dep.found()
|
|
|
|
else
|
|
|
|
intl_dep = dependency('', required: false)
|
2020-06-24 11:27:40 +00:00
|
|
|
endif
|
|
|
|
if not have_gettext and get_option('nls').enabled()
|
|
|
|
error('gettext() is required to build libvirt')
|
|
|
|
endif
|
|
|
|
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if cc.check_header('libintl.h')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_LIBINTL_H', 1)
|
2020-06-24 11:27:40 +00:00
|
|
|
elif get_option('nls').enabled()
|
|
|
|
error('libintl.h is required to build libvirt')
|
|
|
|
endif
|
|
|
|
|
|
|
|
gettext_progs = [
|
|
|
|
'xgettext',
|
|
|
|
'msgfmt',
|
|
|
|
'msgmerge',
|
|
|
|
]
|
|
|
|
foreach name : gettext_progs
|
|
|
|
prog = find_program(name, required: false)
|
|
|
|
set_variable('@0@_prog'.format(name), prog)
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
if xgettext_prog.found() and msgfmt_prog.found() and msgmerge_prog.found()
|
2022-01-22 19:30:11 +00:00
|
|
|
rc = run_command(msgfmt_prog, '--version', check: false)
|
2020-06-24 11:27:40 +00:00
|
|
|
if rc.returncode() == 0 and rc.stdout().contains('GNU gettext')
|
|
|
|
have_gnu_gettext_tools = true
|
|
|
|
endif
|
|
|
|
endif
|
2020-09-04 11:52:13 +00:00
|
|
|
else
|
|
|
|
intl_dep = dependency('', required: false)
|
2020-06-24 11:27:40 +00:00
|
|
|
endif
|
|
|
|
|
2023-03-23 08:21:17 +00:00
|
|
|
numactl_dep = dependency('numa', required: get_option('numactl'))
|
2020-04-29 13:43:09 +00:00
|
|
|
if numactl_dep.found()
|
|
|
|
conf.set('WITH_NUMACTL', 1)
|
2022-12-09 13:41:24 +00:00
|
|
|
if cc.has_function('numa_set_preferred_many', dependencies: numactl_dep)
|
|
|
|
conf.set('WITH_NUMACTL_SET_PREFERRED_MANY', 1)
|
|
|
|
endif
|
2020-04-29 13:43:09 +00:00
|
|
|
endif
|
|
|
|
|
2020-10-09 07:46:08 +00:00
|
|
|
openwsman_version = '2.6.3'
|
2020-04-29 08:18:37 +00:00
|
|
|
openwsman_dep = dependency('openwsman', version: '>=' + openwsman_version, required: get_option('openwsman'))
|
|
|
|
|
2020-04-30 09:35:36 +00:00
|
|
|
parallels_sdk_version = '7.0.22'
|
|
|
|
parallels_sdk_dep = dependency('parallels-sdk', version: '>=' + parallels_sdk_version, required: false)
|
|
|
|
|
2020-04-30 09:35:51 +00:00
|
|
|
pciaccess_version = '0.10.0'
|
|
|
|
pciaccess_dep = dependency('pciaccess', version: '>=' + pciaccess_version, required: get_option('pciaccess'))
|
|
|
|
|
2022-03-24 10:53:03 +00:00
|
|
|
rbd_dep = cc.find_library('rbd', required: get_option('storage_rbd'))
|
|
|
|
rados_dep = cc.find_library('rados', required: get_option('storage_rbd'))
|
2020-04-29 09:37:40 +00:00
|
|
|
if rbd_dep.found() and not cc.has_function('rbd_get_features', dependencies: rbd_dep)
|
|
|
|
rbd_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
if rbd_dep.found() and rados_dep.found()
|
|
|
|
if cc.has_function('rbd_list2', dependencies: rbd_dep)
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_RBD_LIST2', 1)
|
2020-04-29 09:37:40 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
rbd_dep = declare_dependency(dependencies: [ rbd_dep, rados_dep ])
|
|
|
|
else
|
|
|
|
rbd_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
|
2020-07-29 12:20:43 +00:00
|
|
|
# readline 7.0 is the first version which includes pkg-config support
|
|
|
|
readline_version = '7.0'
|
2020-10-08 11:24:55 +00:00
|
|
|
if not get_option('readline').disabled()
|
|
|
|
readline_dep = dependency('readline', version: '>=' + readline_version, required: false)
|
|
|
|
if not readline_dep.found()
|
|
|
|
readline_dep = cc.find_library('readline', required: get_option('readline'))
|
|
|
|
|
|
|
|
if readline_dep.found()
|
|
|
|
# This variable is present in all reasonable (5.0+) readline versions;
|
|
|
|
# however, the macOS base system contains a library called libedit which
|
|
|
|
# takes over the readline name despite lacking many of its features. We
|
|
|
|
# want to make sure we only enable readline support when linking against
|
|
|
|
# the actual readline library, and the availability of this specific
|
|
|
|
# variable is as good a witness for that fact as any.
|
|
|
|
correct_rl = cc.has_header_symbol('readline/readline.h', 'rl_completion_quote_character', prefix: '#include <stdio.h>')
|
|
|
|
if not correct_rl
|
|
|
|
if get_option('readline').enabled()
|
|
|
|
error('readline is missing rl_completion_quote_character')
|
|
|
|
else
|
|
|
|
readline_dep = dependency('', required: false)
|
|
|
|
endif
|
2020-08-05 10:39:24 +00:00
|
|
|
endif
|
2020-07-29 12:20:43 +00:00
|
|
|
endif
|
|
|
|
endif
|
2021-05-26 15:47:58 +00:00
|
|
|
if readline_dep.found()
|
|
|
|
# We need this to avoid compilation issues with modern compilers.
|
|
|
|
# See 9ea3424a178 for a more detailed explanation
|
|
|
|
readline_dep = declare_dependency(
|
|
|
|
compile_args: [ '-D_FUNCTION_DEF' ],
|
|
|
|
dependencies: [ readline_dep ],
|
|
|
|
)
|
|
|
|
|
|
|
|
conf.set('WITH_READLINE', 1)
|
|
|
|
endif
|
2020-10-08 11:24:55 +00:00
|
|
|
else
|
|
|
|
readline_dep = dependency('', required: false)
|
2020-07-29 12:20:43 +00:00
|
|
|
endif
|
|
|
|
|
2020-07-29 12:21:00 +00:00
|
|
|
if not get_option('sanlock').disabled()
|
2021-05-27 14:35:38 +00:00
|
|
|
sanlock_dep = dependency('libsanlock_client', required: get_option('sanlock'))
|
2020-07-29 12:21:00 +00:00
|
|
|
|
|
|
|
if sanlock_dep.found()
|
|
|
|
conf.set('WITH_SANLOCK', 1)
|
|
|
|
# check for sanlock_strerror introduced in sanlock-3.5.0
|
|
|
|
if cc.has_function('sanlock_strerror', dependencies: sanlock_dep)
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_SANLOCK_STRERROR', 1)
|
2020-07-29 12:21:00 +00:00
|
|
|
endif
|
|
|
|
endif
|
2023-03-23 08:15:35 +00:00
|
|
|
else
|
|
|
|
sanlock_dep = dependency('', required: false)
|
2020-07-29 12:21:00 +00:00
|
|
|
endif
|
|
|
|
|
2020-07-29 12:21:14 +00:00
|
|
|
sasl_version = '2.1.26'
|
2021-07-07 11:00:12 +00:00
|
|
|
if conf.has('WITH_REMOTE')
|
2020-07-29 12:21:14 +00:00
|
|
|
sasl_dep = dependency('libsasl2', version: '>=' + sasl_version, required: get_option('sasl'))
|
|
|
|
if sasl_dep.found()
|
|
|
|
conf.set('WITH_SASL', 1)
|
|
|
|
endif
|
|
|
|
else
|
|
|
|
sasl_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
|
2021-05-26 16:46:20 +00:00
|
|
|
selinux_dep = dependency('libselinux', required: get_option('selinux'))
|
2020-07-29 12:21:29 +00:00
|
|
|
if selinux_dep.found()
|
|
|
|
selinux_mount = get_option('selinux_mount')
|
|
|
|
if selinux_mount == ''
|
2022-01-22 19:30:11 +00:00
|
|
|
if run_command('test', '-d', '/sys/fs/selinux', check: false).returncode() == 0
|
2020-07-29 12:21:29 +00:00
|
|
|
selinux_mount = '/sys/fs/selinux'
|
|
|
|
else
|
|
|
|
selinux_mount = '/selinux'
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
conf.set_quoted('SELINUX_MOUNT', selinux_mount)
|
|
|
|
conf.set('WITH_SELINUX', 1)
|
|
|
|
endif
|
|
|
|
|
2021-05-27 17:04:10 +00:00
|
|
|
thread_dep = dependency('threads')
|
2020-04-29 09:46:41 +00:00
|
|
|
pthread_sigmask_code = '''
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <signal.h>
|
|
|
|
|
|
|
|
int main() {
|
|
|
|
#ifdef pthread_sigmask
|
|
|
|
int (*foo)(int, const sigset_t *, sigset_t *) = &pthread_sigmask;
|
|
|
|
return !foo;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
if not cc.compiles(pthread_sigmask_code)
|
|
|
|
conf.set('FUNC_PTHREAD_SIGMASK_BROKEN', 1)
|
|
|
|
endif
|
|
|
|
|
2020-07-29 12:21:43 +00:00
|
|
|
udev_version = '219'
|
|
|
|
udev_dep = dependency('libudev', version: '>=' + udev_version, required: get_option('udev'))
|
|
|
|
if udev_dep.found()
|
|
|
|
conf.set('WITH_UDEV', 1)
|
|
|
|
endif
|
|
|
|
|
2020-10-08 11:51:00 +00:00
|
|
|
libutil_dep = cc.find_library('util', required: false)
|
2020-04-29 13:43:27 +00:00
|
|
|
|
2020-06-16 20:54:17 +00:00
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
ole32_dep = cc.find_library('ole32')
|
|
|
|
oleaut32_dep = cc.find_library('oleaut32')
|
2020-12-01 20:15:20 +00:00
|
|
|
winsock2_dep = cc.find_library('ws2_32')
|
2020-06-16 20:54:17 +00:00
|
|
|
win32_dep = declare_dependency(
|
|
|
|
dependencies: [
|
|
|
|
ole32_dep,
|
|
|
|
oleaut32_dep,
|
2020-12-01 20:15:20 +00:00
|
|
|
winsock2_dep,
|
2020-06-16 20:54:17 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
if get_option('default_library') == 'static'
|
|
|
|
win32_flags = [ '-DLIBVIRT_STATIC' ]
|
|
|
|
else
|
|
|
|
win32_flags = []
|
|
|
|
endif
|
|
|
|
win32_link_flags = [ '-Wl,-no-undefined' ]
|
|
|
|
else
|
|
|
|
win32_dep = dependency('', required: false)
|
|
|
|
win32_flags = []
|
|
|
|
win32_link_flags = []
|
|
|
|
endif
|
|
|
|
|
2020-11-13 11:08:47 +00:00
|
|
|
wireshark_version = '2.6.0'
|
2020-07-29 12:22:10 +00:00
|
|
|
wireshark_dep = dependency('wireshark', version: '>=' + wireshark_version, required: get_option('wireshark_dissector'))
|
2021-12-08 12:12:17 +00:00
|
|
|
if wireshark_dep.found()
|
|
|
|
if not xdr_dep.found()
|
|
|
|
if get_option('wireshark_dissector').enabled()
|
|
|
|
error('XDR is required for wireshark plugin')
|
|
|
|
else
|
|
|
|
wireshark_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2020-07-29 12:22:10 +00:00
|
|
|
if wireshark_dep.found()
|
|
|
|
wireshark_plugindir = get_option('wireshark_plugindir')
|
|
|
|
if wireshark_plugindir == ''
|
2022-10-07 07:48:00 +00:00
|
|
|
wireshark_plugindir = wireshark_dep.get_variable(pkgconfig : 'plugindir')
|
2020-07-29 12:22:10 +00:00
|
|
|
endif
|
|
|
|
|
2022-10-07 07:48:00 +00:00
|
|
|
wireshark_prefix = wireshark_dep.get_variable(pkgconfig : 'prefix')
|
2020-11-13 11:08:47 +00:00
|
|
|
if wireshark_prefix == ''
|
|
|
|
# If wireshark's prefix cannot be retrieved from pkg-config,
|
|
|
|
# this is our best bet.
|
|
|
|
wireshark_prefix = '/usr'
|
2020-07-29 12:22:10 +00:00
|
|
|
endif
|
2020-11-13 11:08:47 +00:00
|
|
|
# Replace wireshark's prefix with our own.
|
|
|
|
# There is no replace method in meson so we have to workaround it.
|
|
|
|
rc = run_command(
|
|
|
|
'python3', '-c',
|
|
|
|
'print("@0@".replace("@1@", "@2@"))'.format(
|
|
|
|
wireshark_plugindir, wireshark_prefix, prefix,
|
|
|
|
),
|
|
|
|
check: true,
|
|
|
|
)
|
|
|
|
wireshark_plugindir = rc.stdout().strip()
|
2020-07-29 12:22:10 +00:00
|
|
|
|
|
|
|
# Since wireshark 2.5.0 plugins can't live in top level plugindir but have
|
|
|
|
# to be under one of ["epan", "wiretap", "codecs"] subdir. The first one looks okay.
|
|
|
|
wireshark_plugindir = wireshark_plugindir / 'epan'
|
2020-09-07 15:50:24 +00:00
|
|
|
|
|
|
|
# Wireshark is installing ws_version.h since v2.9.0, but some distributions
|
|
|
|
# are not shipping it.
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if cc.check_header('wireshark/ws_version.h')
|
2020-09-07 15:50:24 +00:00
|
|
|
conf.set('WITH_WS_VERSION', 1)
|
|
|
|
endif
|
2020-07-29 12:22:10 +00:00
|
|
|
endif
|
|
|
|
|
2020-06-24 11:24:53 +00:00
|
|
|
# generic build dependencies checks
|
|
|
|
|
|
|
|
if bash_completion_dep.found() and not readline_dep.found()
|
|
|
|
if get_option('bash_completion').enabled()
|
|
|
|
error('readline is required for bash completion support')
|
|
|
|
else
|
|
|
|
bash_completion_dep = dependency('', required: false)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
if bash_completion_dep.found()
|
|
|
|
bash_completion_dir = get_option('bash_completion_dir')
|
|
|
|
if bash_completion_dir == ''
|
2022-10-07 07:48:00 +00:00
|
|
|
bash_completion_dir = bash_completion_dep.get_variable(pkgconfig : 'completionsdir')
|
|
|
|
bash_completion_prefix = bash_completion_dep.get_variable(pkgconfig : 'prefix')
|
2020-06-24 11:24:53 +00:00
|
|
|
rc = run_command(
|
|
|
|
'python3', '-c',
|
|
|
|
'print("@0@".replace("@1@", "@2@"))'.format(
|
|
|
|
bash_completion_dir, bash_completion_prefix, prefix,
|
|
|
|
),
|
|
|
|
check: true,
|
|
|
|
)
|
|
|
|
bash_completion_dir = rc.stdout().strip()
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2021-05-26 15:31:03 +00:00
|
|
|
if not get_option('firewalld').disabled()
|
|
|
|
firewalld_enable = true
|
|
|
|
|
2021-05-26 15:33:08 +00:00
|
|
|
if host_machine.system() != 'linux'
|
2021-05-26 15:31:03 +00:00
|
|
|
firewalld_enable = false
|
2021-05-26 15:33:08 +00:00
|
|
|
if get_option('firewalld').enabled()
|
|
|
|
error('firewalld support can only be enabled on Linux')
|
|
|
|
endif
|
2021-05-26 15:31:03 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
if firewalld_enable
|
2020-06-24 11:25:51 +00:00
|
|
|
conf.set('WITH_FIREWALLD', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2020-06-24 11:26:08 +00:00
|
|
|
if not get_option('firewalld_zone').disabled() and conf.has('WITH_FIREWALLD')
|
|
|
|
conf.set('WITH_FIREWALLD_ZONE', 1)
|
|
|
|
elif get_option('firewalld_zone').enabled()
|
|
|
|
error('You must have firewalld support enabled to enable firewalld_zone')
|
|
|
|
endif
|
|
|
|
|
2021-05-25 16:14:09 +00:00
|
|
|
if not get_option('polkit').disabled()
|
|
|
|
polkit_enable = true
|
|
|
|
|
|
|
|
if get_option('polkit').auto()
|
|
|
|
pkcheck_prog = find_program('pkcheck', required: false, dirs: libvirt_sbin_path)
|
|
|
|
polkit_enable = pkcheck_prog.found()
|
|
|
|
endif
|
|
|
|
|
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
polkit_enable = false
|
|
|
|
if get_option('polkit').enabled()
|
|
|
|
error('polkit support cannot be enabled on Windows')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if polkit_enable
|
|
|
|
conf.set('WITH_POLKIT', 1)
|
|
|
|
endif
|
2020-04-29 09:32:41 +00:00
|
|
|
endif
|
|
|
|
|
2020-07-29 12:21:43 +00:00
|
|
|
if udev_dep.found() and not pciaccess_dep.found()
|
|
|
|
error('You must install the pciaccess module to build with udev')
|
|
|
|
endif
|
|
|
|
|
2020-06-24 11:24:53 +00:00
|
|
|
|
2020-07-29 12:22:35 +00:00
|
|
|
# build driver options
|
|
|
|
|
2021-05-25 09:45:06 +00:00
|
|
|
remote_default_mode = get_option('remote_default_mode')
|
|
|
|
if remote_default_mode == 'direct'
|
|
|
|
conf.set('REMOTE_DRIVER_AUTOSTART_DIRECT', '1')
|
|
|
|
endif
|
2020-07-29 12:22:35 +00:00
|
|
|
|
2020-06-16 21:47:29 +00:00
|
|
|
if not get_option('driver_libvirtd').disabled()
|
|
|
|
use_libvirtd = true
|
|
|
|
|
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
use_libvirtd = false
|
|
|
|
if get_option('driver_libvirtd').enabled()
|
|
|
|
error('libvirtd daemon is not supported on windows')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if not conf.has('WITH_REMOTE')
|
|
|
|
use_libvirtd = false
|
|
|
|
if get_option('driver_libvirtd').enabled()
|
|
|
|
error('remote driver is required for libvirtd daemon')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_libvirtd
|
|
|
|
conf.set('WITH_LIBVIRTD', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2020-04-29 22:56:50 +00:00
|
|
|
if not get_option('driver_bhyve').disabled() and host_machine.system() == 'freebsd'
|
|
|
|
bhyve_prog = find_program('bhyve', required: get_option('driver_bhyve'))
|
|
|
|
bhyvectl_prog = find_program('bhyvectl', required: get_option('driver_bhyve'))
|
|
|
|
bhyveload_prog = find_program('bhyveload', required: get_option('driver_bhyve'))
|
|
|
|
|
|
|
|
if bhyve_prog.found() and bhyvectl_prog.found() and bhyveload_prog.found()
|
|
|
|
conf.set('WITH_BHYVE', 1)
|
2022-10-07 07:43:33 +00:00
|
|
|
conf.set_quoted('BHYVE', bhyve_prog.full_path())
|
|
|
|
conf.set_quoted('BHYVECTL', bhyvectl_prog.full_path())
|
|
|
|
conf.set_quoted('BHYVELOAD', bhyveload_prog.full_path())
|
2020-04-29 22:56:50 +00:00
|
|
|
endif
|
|
|
|
elif get_option('driver_bhyve').enabled()
|
|
|
|
error('The bhyve driver cannot be enabled')
|
|
|
|
endif
|
|
|
|
|
2020-07-22 15:53:26 +00:00
|
|
|
if not get_option('driver_esx').disabled() and curl_dep.found()
|
|
|
|
conf.set('WITH_ESX', 1)
|
|
|
|
conf.set('WITH_VMX', 1)
|
|
|
|
elif get_option('driver_esx').enabled()
|
|
|
|
error('Curl is required for the ESX driver')
|
|
|
|
endif
|
|
|
|
|
2020-04-30 10:24:29 +00:00
|
|
|
if not get_option('driver_hyperv').disabled() and openwsman_dep.found()
|
|
|
|
conf.set('WITH_HYPERV', 1)
|
|
|
|
elif get_option('driver_hyperv').enabled()
|
|
|
|
error('openwsman is required for the Hyper-V driver')
|
|
|
|
endif
|
|
|
|
|
2021-01-21 21:01:06 +00:00
|
|
|
if not get_option('driver_interface').disabled() and conf.has('WITH_LIBVIRTD') and (udev_dep.found() or conf.has('WITH_NETCF'))
|
2020-04-28 20:52:30 +00:00
|
|
|
conf.set('WITH_INTERFACE', 1)
|
|
|
|
elif get_option('driver_interface').enabled()
|
|
|
|
error('Requested the Interface driver without netcf or udev and libvirtd support')
|
|
|
|
endif
|
|
|
|
|
2020-04-30 09:30:11 +00:00
|
|
|
if not get_option('driver_libxl').disabled() and conf.has('WITH_LIBVIRTD')
|
2021-06-14 17:17:54 +00:00
|
|
|
libxl_version = '4.9.0'
|
2020-04-30 09:30:11 +00:00
|
|
|
libxl_dep = dependency('xenlight', version: '>=' + libxl_version, required: get_option('driver_libxl'))
|
|
|
|
|
|
|
|
if libxl_dep.found()
|
2022-12-12 11:46:41 +00:00
|
|
|
libxl_firmware_dir = libxl_dep.get_variable(pkgconfig : 'xenfirmwaredir', default_value: '')
|
|
|
|
libxl_execbin = libxl_dep.get_variable(pkgconfig : 'libexec_bin', default_value: '')
|
2020-04-30 09:30:11 +00:00
|
|
|
if libxl_firmware_dir != ''
|
|
|
|
conf.set_quoted('LIBXL_FIRMWARE_DIR', libxl_firmware_dir)
|
|
|
|
endif
|
|
|
|
if libxl_execbin != ''
|
|
|
|
conf.set_quoted('LIBXL_EXECBIN_DIR', libxl_execbin)
|
|
|
|
endif
|
|
|
|
|
|
|
|
# If building with libxl, use the libxl utility header and lib too
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if cc.check_header('libxlutil.h')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_LIBXLUTIL_H', 1)
|
2020-04-30 09:30:11 +00:00
|
|
|
endif
|
2021-05-26 16:46:20 +00:00
|
|
|
xl_util_dep = dependency('xlutil')
|
2020-04-30 09:30:11 +00:00
|
|
|
|
2021-05-26 16:46:20 +00:00
|
|
|
xen_store_dep = dependency('xenstore')
|
2021-06-14 17:17:54 +00:00
|
|
|
xtl_link_dep = dependency('xentoollog')
|
2020-04-30 09:30:11 +00:00
|
|
|
|
2021-06-14 17:17:54 +00:00
|
|
|
# Upstream Xen failed to advertise LIBXL_API_VERSION 0x040700 and
|
|
|
|
# 0x040800 until the Xen 4.13 release. For Xen versions 4.9-4.12
|
|
|
|
# we'll need to stick with version 0x040500.
|
2021-03-25 16:26:12 +00:00
|
|
|
if libxl_dep.version().version_compare('>=4.13.0')
|
|
|
|
LIBXL_API_VERSION='0x041300'
|
|
|
|
else
|
|
|
|
LIBXL_API_VERSION='0x040500'
|
|
|
|
endif
|
2020-04-30 09:30:11 +00:00
|
|
|
libxl_dep = declare_dependency(
|
2021-03-25 16:26:12 +00:00
|
|
|
compile_args: '-DLIBXL_API_VERSION=' + LIBXL_API_VERSION,
|
2020-04-30 09:30:11 +00:00
|
|
|
dependencies: [
|
|
|
|
libxl_dep,
|
|
|
|
xtl_link_dep,
|
|
|
|
xl_util_dep,
|
|
|
|
xen_store_dep,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check if Xen has support for PVH
|
|
|
|
if cc.has_header_symbol('libxl.h', 'LIBXL_DOMAIN_TYPE_PVH')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_XEN_PVH', 1)
|
2020-04-30 09:30:11 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
conf.set('WITH_LIBXL', 1)
|
|
|
|
endif
|
|
|
|
elif get_option('driver_libxl').enabled()
|
|
|
|
error('libvirtd is required for libxenlight')
|
|
|
|
endif
|
|
|
|
|
2020-04-30 09:30:32 +00:00
|
|
|
if not get_option('driver_lxc').disabled() and host_machine.system() == 'linux' and conf.has('WITH_LIBVIRTD')
|
2022-12-08 09:17:37 +00:00
|
|
|
conf.set('WITH_LXC', 1)
|
2020-04-30 09:30:32 +00:00
|
|
|
elif get_option('driver_lxc').enabled()
|
|
|
|
error('linux and remote_driver are required for LXC')
|
|
|
|
endif
|
|
|
|
|
2021-05-12 17:01:31 +00:00
|
|
|
if not get_option('driver_ch').disabled() and host_machine.system() == 'linux' and (host_machine.cpu_family() == 'x86_64' or host_machine.cpu_family() == 'aarch64')
|
|
|
|
use_ch = true
|
|
|
|
|
|
|
|
if not conf.has('WITH_LIBVIRTD')
|
|
|
|
use_ch = false
|
|
|
|
if get_option('driver_ch').enabled()
|
|
|
|
error('libvirtd is required to build Cloud-Hypervisor driver')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2024-09-04 14:10:17 +00:00
|
|
|
if not json_c_dep.found()
|
2021-05-12 17:01:31 +00:00
|
|
|
use_ch = false
|
|
|
|
if get_option('driver_ch').enabled()
|
2024-09-04 14:10:17 +00:00
|
|
|
error('json-c is required to build Cloud-Hypervisor driver')
|
2021-05-12 17:01:31 +00:00
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if not curl_dep.found()
|
|
|
|
use_ch = false
|
|
|
|
if get_option('driver_ch').enabled()
|
|
|
|
error('curl is required to build Cloud-Hypervisor driver')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_ch
|
|
|
|
conf.set('WITH_CH', 1)
|
|
|
|
|
|
|
|
default_ch_user = 'root'
|
|
|
|
default_ch_group = 'root'
|
|
|
|
ch_user = get_option('ch_user')
|
|
|
|
if ch_user == ''
|
|
|
|
ch_user = default_ch_user
|
|
|
|
endif
|
|
|
|
ch_group = get_option('ch_group')
|
|
|
|
if ch_group == ''
|
|
|
|
ch_group = default_ch_group
|
|
|
|
endif
|
|
|
|
conf.set_quoted('CH_USER', ch_user)
|
|
|
|
conf.set_quoted('CH_GROUP', ch_group)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2021-12-03 13:06:54 +00:00
|
|
|
if not get_option('driver_network').disabled() and conf.has('WITH_LIBVIRTD')
|
2020-04-30 11:35:50 +00:00
|
|
|
conf.set('WITH_NETWORK', 1)
|
network: add an nftables backend for network driver's firewall construction
Support using nftables to setup the firewall for each virtual network,
rather than iptables. The initial implementation of the nftables
backend creates (almost) exactly the same ruleset as the iptables
backend, determined by running the following commands on a host that
has an active virtual network:
iptables-save >iptables.txt
iptables-restore-translate -f iptables.txt
(and the similar ip6tables-save/ip6tables-restore-translate for an
IPv6 network). Correctness of the new backend was checked by comparing
the output of:
nft list ruleset
when the backend is set to iptables and when it is set to nftables.
This page was used as a guide:
https://wiki.nftables.org/wiki-nftables/index.php/Moving_from_iptables_to_nftables
The only differences between the rules created by the nftables backed
vs. the iptables backend (aside from a few inconsequential changes in
display order of some chains/options) are:
1) When we add nftables rules, rather than adding them in the
system-created "filter" and "nat" tables, we add them in a private
table (ie only we should be using it) created by us called "libvirt"
(the system-created "filter" and "nat" tables can't be used because
adding any rules to those tables directly with nft will cause failure
of any legacy application attempting to use iptables when it tries to
list the iptables rules (e.g. "iptables -S").
(NB: in nftables only a single table is required for both nat and
filter rules - the chains for each are differentiated by specifying
different "hook" locations for the toplevel chain of each)
2) Since the rules that were added to allow tftp/dns/dhcp traffic from
the guests to the host are unnecessary in the context of nftables,
those rules aren't added.
(Longer explanation: In the case of iptables, all rules were in a
single table, and it was always assumed that there would be some
"catch-all" REJECT rule added by "someone else" in the case that a
packet didn't match any specific rules, so libvirt added these
specific rules to ensure that, no matter what other rules were added
by any other subsystem, the guests would still have functional
tftp/dns/dhcp. For nftables though, the rules added by each subsystem
are in a separate table, and in order for traffic to be accepted, it
must be accepted by *all* tables, so just adding the specific rules to
libvirt's table doesn't help anything (as the default for the libvirt
table is ACCEPT anyway) and it just isn't practical/possible for
libvirt to find *all* other tables and add rules in all of them to
make sure the traffic is accepted. libvirt does this for firewalld (it
creates a "libvirt" zone that allows tftp/dns/dhcp, and adds all
virtual network bridges to that zone), however, so in that case no
extra work is required of the sysadmin.)
3) nftables doesn't support the "checksum mangle" rule (or any
equivalent functionality) that we have historically added to our
iptables rules, so the nftables rules we add have nothing related to
checksum mangling.
(NB: The result of (3) is that if you a) have a very old guest (RHEL5
era or earlier) and b) that guest is using a virtio-net network
device, and c) the virtio-net device is using vhost packet processing
(the default) then DHCP on the guest will fail. You can work around
this by adding <driver name='qemu'/> to the <interface> XML for the
guest).
There are certainly much better nftables rulesets that could be used
instead of those implemented here, and everything is in place to make
future changes to the rules that are used simple and free of surprises
(e.g. the rules that are added have coresponding "removal" commands
added to the network status so that we will always remove exactly the
rules that were previously added rather than trying to remove the
rules that "the current build of libvirt would have added" (which will
be incorrect the first time we run a libvirt with a newly modified
ruleset). For this initial implementation though, I wanted the
nftables rules to be as identical to the iptables rules as possible,
just to make it easier to verify that everything is working.
The backend can be manually chosen using the firewall_backend setting
in /etc/libvirt/network.conf. libvirtd/virtnetworkd will read this
setting when it starts; if there is no explicit setting, it will check
for availability of FIREWALL_BACKEND_DEFAULT_1 and then
FIREWALL_BACKEND_DEFAULT_2 (which are set at build time in
meson_options.txt or by adding -Dfirewall_backend_default_n=blah to
the meson commandline), and use the first backend that is available
(ie, that has the necessary programs installed). The standard
meson_options.txt is set to check for nftables first, and then
iptables.
Although it should be very safe to change the default backend from
iptables to nftables, that change is left for a later patch, to show
how the change in default can be undone if someone really needs to do
that.
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Tested-by: Daniel P. Berrangé <berrange@redhat.com>
2024-04-20 02:19:43 +00:00
|
|
|
|
2024-05-28 13:12:49 +00:00
|
|
|
firewall_backend_priority = get_option('firewall_backend_priority')
|
network: introduce a "none" firewall backend type
There are two scenarios identified after the recent firewall backend
selection was introduced, which result in libvirtd failing to startup
due to an inability to find either iptables/nftables
- On Linux if running unprivileged with $PATH lacking the dir
containing iptables/nftables
- On non-Linux where iptables/nftables never existed
In the former case, it is preferrable to restore the behaviour whereby
the driver starts successfully. Users will get an error reported when
attempting to start any virtual network, due to the lack of permissions
needed to create bridge devices. This makes the missing firewall backend
irrelevant.
In the latter case, the network driver calls the 'nop' platform
implementation which does not attempt to implement any firewall logic,
just allowing the network to start without firewall rules.
To solve this are number of changes are required
* Introduce VIR_FIREWALL_BACKEND_NONE, which does nothing except
report a fatal error from virFirewallApply(). This code path
is unreachable, since we'll never create a virFirewall
object with with VIR_FIREWALL_BACKEND_NONE, so the error reporting
is just a sanity check.
* Ignore the compile time backend defaults and assume use of
the 'none' backend if running unprivileged.
This fixes the first regression, avoiding the failure to start
libvirtd on Linux in unprivileged context, instead allowing use
of the driver and expecting a permission denied when creating a
bridge.
* Reject the use of compile time backend defaults no non-Linux
and hardcode the 'none' backend. The non-Linux platforms have
no firewall implementation at all currently, so there's no
reason to permit the use of 'firewall_backend_priority'
meson option.
This fixes the second regression, avoiding the failure to start
libvirtd on non-Linux hosts due to non-existant Linux binaries.
* Change the Linux platform backend to raise an error if the
firewall backend is 'none'. Again this code path is unreachable
by default since we'll fail to create the bridge before getting
here, but if someone modified network.conf to request the 'none'
backend, this will stop further progress.
* Change the nop platform backend to raise an error if the
firewall backend is 'iptables' or 'nftables'. Again this code
path is unreachable, since we should already have failed to
find the iptables/nftables binaries on non-Linux hosts, so
this is just a sanity check.
* 'none' is not permited as a value in 'firewall_backend_priority'
meson option, since it is conceptually meaningless to ask for
that on Linux.
NB, 'firewall_backend_priority' allows repeated options temporarily,
which we don't want. Meson intends to turn this into a hard error
DEPRECATION: Duplicated values in array option is deprecated. This will become a hard error in the future.
and we can live with the reduced error checking until that happens.
Reviewed-by: Andrea Bolognani <abologna@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2024-06-13 17:16:48 +00:00
|
|
|
if firewall_backend_priority.length() == 0
|
|
|
|
if host_machine.system() == 'linux'
|
|
|
|
firewall_backend_priority = ['nftables', 'iptables']
|
|
|
|
else
|
|
|
|
# No firewall impl on non-Linux so far, so force 'none'
|
|
|
|
# as placeholder
|
|
|
|
firewall_backend_priority = ['none']
|
|
|
|
endif
|
|
|
|
else
|
|
|
|
if host_machine.system() != 'linux'
|
|
|
|
error('firewall backend priority only supported on linux hosts')
|
|
|
|
endif
|
2024-05-28 13:12:49 +00:00
|
|
|
endif
|
network: add an nftables backend for network driver's firewall construction
Support using nftables to setup the firewall for each virtual network,
rather than iptables. The initial implementation of the nftables
backend creates (almost) exactly the same ruleset as the iptables
backend, determined by running the following commands on a host that
has an active virtual network:
iptables-save >iptables.txt
iptables-restore-translate -f iptables.txt
(and the similar ip6tables-save/ip6tables-restore-translate for an
IPv6 network). Correctness of the new backend was checked by comparing
the output of:
nft list ruleset
when the backend is set to iptables and when it is set to nftables.
This page was used as a guide:
https://wiki.nftables.org/wiki-nftables/index.php/Moving_from_iptables_to_nftables
The only differences between the rules created by the nftables backed
vs. the iptables backend (aside from a few inconsequential changes in
display order of some chains/options) are:
1) When we add nftables rules, rather than adding them in the
system-created "filter" and "nat" tables, we add them in a private
table (ie only we should be using it) created by us called "libvirt"
(the system-created "filter" and "nat" tables can't be used because
adding any rules to those tables directly with nft will cause failure
of any legacy application attempting to use iptables when it tries to
list the iptables rules (e.g. "iptables -S").
(NB: in nftables only a single table is required for both nat and
filter rules - the chains for each are differentiated by specifying
different "hook" locations for the toplevel chain of each)
2) Since the rules that were added to allow tftp/dns/dhcp traffic from
the guests to the host are unnecessary in the context of nftables,
those rules aren't added.
(Longer explanation: In the case of iptables, all rules were in a
single table, and it was always assumed that there would be some
"catch-all" REJECT rule added by "someone else" in the case that a
packet didn't match any specific rules, so libvirt added these
specific rules to ensure that, no matter what other rules were added
by any other subsystem, the guests would still have functional
tftp/dns/dhcp. For nftables though, the rules added by each subsystem
are in a separate table, and in order for traffic to be accepted, it
must be accepted by *all* tables, so just adding the specific rules to
libvirt's table doesn't help anything (as the default for the libvirt
table is ACCEPT anyway) and it just isn't practical/possible for
libvirt to find *all* other tables and add rules in all of them to
make sure the traffic is accepted. libvirt does this for firewalld (it
creates a "libvirt" zone that allows tftp/dns/dhcp, and adds all
virtual network bridges to that zone), however, so in that case no
extra work is required of the sysadmin.)
3) nftables doesn't support the "checksum mangle" rule (or any
equivalent functionality) that we have historically added to our
iptables rules, so the nftables rules we add have nothing related to
checksum mangling.
(NB: The result of (3) is that if you a) have a very old guest (RHEL5
era or earlier) and b) that guest is using a virtio-net network
device, and c) the virtio-net device is using vhost packet processing
(the default) then DHCP on the guest will fail. You can work around
this by adding <driver name='qemu'/> to the <interface> XML for the
guest).
There are certainly much better nftables rulesets that could be used
instead of those implemented here, and everything is in place to make
future changes to the rules that are used simple and free of surprises
(e.g. the rules that are added have coresponding "removal" commands
added to the network status so that we will always remove exactly the
rules that were previously added rather than trying to remove the
rules that "the current build of libvirt would have added" (which will
be incorrect the first time we run a libvirt with a newly modified
ruleset). For this initial implementation though, I wanted the
nftables rules to be as identical to the iptables rules as possible,
just to make it easier to verify that everything is working.
The backend can be manually chosen using the firewall_backend setting
in /etc/libvirt/network.conf. libvirtd/virtnetworkd will read this
setting when it starts; if there is no explicit setting, it will check
for availability of FIREWALL_BACKEND_DEFAULT_1 and then
FIREWALL_BACKEND_DEFAULT_2 (which are set at build time in
meson_options.txt or by adding -Dfirewall_backend_default_n=blah to
the meson commandline), and use the first backend that is available
(ie, that has the necessary programs installed). The standard
meson_options.txt is set to check for nftables first, and then
iptables.
Although it should be very safe to change the default backend from
iptables to nftables, that change is left for a later patch, to show
how the change in default can be undone if someone really needs to do
that.
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Tested-by: Daniel P. Berrangé <berrange@redhat.com>
2024-04-20 02:19:43 +00:00
|
|
|
|
network: introduce a "none" firewall backend type
There are two scenarios identified after the recent firewall backend
selection was introduced, which result in libvirtd failing to startup
due to an inability to find either iptables/nftables
- On Linux if running unprivileged with $PATH lacking the dir
containing iptables/nftables
- On non-Linux where iptables/nftables never existed
In the former case, it is preferrable to restore the behaviour whereby
the driver starts successfully. Users will get an error reported when
attempting to start any virtual network, due to the lack of permissions
needed to create bridge devices. This makes the missing firewall backend
irrelevant.
In the latter case, the network driver calls the 'nop' platform
implementation which does not attempt to implement any firewall logic,
just allowing the network to start without firewall rules.
To solve this are number of changes are required
* Introduce VIR_FIREWALL_BACKEND_NONE, which does nothing except
report a fatal error from virFirewallApply(). This code path
is unreachable, since we'll never create a virFirewall
object with with VIR_FIREWALL_BACKEND_NONE, so the error reporting
is just a sanity check.
* Ignore the compile time backend defaults and assume use of
the 'none' backend if running unprivileged.
This fixes the first regression, avoiding the failure to start
libvirtd on Linux in unprivileged context, instead allowing use
of the driver and expecting a permission denied when creating a
bridge.
* Reject the use of compile time backend defaults no non-Linux
and hardcode the 'none' backend. The non-Linux platforms have
no firewall implementation at all currently, so there's no
reason to permit the use of 'firewall_backend_priority'
meson option.
This fixes the second regression, avoiding the failure to start
libvirtd on non-Linux hosts due to non-existant Linux binaries.
* Change the Linux platform backend to raise an error if the
firewall backend is 'none'. Again this code path is unreachable
by default since we'll fail to create the bridge before getting
here, but if someone modified network.conf to request the 'none'
backend, this will stop further progress.
* Change the nop platform backend to raise an error if the
firewall backend is 'iptables' or 'nftables'. Again this code
path is unreachable, since we should already have failed to
find the iptables/nftables binaries on non-Linux hosts, so
this is just a sanity check.
* 'none' is not permited as a value in 'firewall_backend_priority'
meson option, since it is conceptually meaningless to ask for
that on Linux.
NB, 'firewall_backend_priority' allows repeated options temporarily,
which we don't want. Meson intends to turn this into a hard error
DEPRECATION: Duplicated values in array option is deprecated. This will become a hard error in the future.
and we can live with the reduced error checking until that happens.
Reviewed-by: Andrea Bolognani <abologna@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2024-06-13 17:16:48 +00:00
|
|
|
backends = []
|
|
|
|
foreach backend: firewall_backend_priority
|
|
|
|
backend = 'VIR_FIREWALL_BACKEND_' + backend.to_upper()
|
|
|
|
backends += backend
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
conf.set('FIREWALL_BACKENDS', ', '.join(backends))
|
2021-12-03 13:06:54 +00:00
|
|
|
elif get_option('driver_network').enabled()
|
|
|
|
error('libvirtd must be enabled to build the network driver')
|
2020-04-30 11:35:50 +00:00
|
|
|
endif
|
|
|
|
|
2020-10-08 10:46:11 +00:00
|
|
|
if udev_dep.found() and conf.has('WITH_LIBVIRTD')
|
2020-06-30 17:53:59 +00:00
|
|
|
conf.set('WITH_NODE_DEVICES', 1)
|
|
|
|
endif
|
|
|
|
|
2020-04-29 23:03:08 +00:00
|
|
|
if not get_option('driver_openvz').disabled() and host_machine.system() == 'linux'
|
|
|
|
conf.set('WITH_OPENVZ', 1)
|
|
|
|
elif get_option('driver_openvz').enabled()
|
|
|
|
error('OpenVZ driver can be enabled on Linux only')
|
|
|
|
endif
|
|
|
|
|
2020-07-01 01:07:00 +00:00
|
|
|
if not get_option('driver_qemu').disabled()
|
|
|
|
use_qemu = true
|
|
|
|
|
2024-09-04 14:10:17 +00:00
|
|
|
if not json_c_dep.found()
|
2020-07-01 01:07:00 +00:00
|
|
|
use_qemu = false
|
|
|
|
if get_option('driver_qemu').enabled()
|
2024-09-04 14:10:17 +00:00
|
|
|
error('json-c is required to build QEMU driver')
|
2020-07-01 01:07:00 +00:00
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if not conf.has('WITH_LIBVIRTD')
|
|
|
|
use_qemu = false
|
|
|
|
if get_option('driver_qemu').enabled()
|
|
|
|
error('libvirtd is required to build QEMU driver')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_qemu
|
|
|
|
conf.set('WITH_QEMU', 1)
|
|
|
|
|
2020-08-20 21:52:17 +00:00
|
|
|
qemu_moddir = get_option('qemu_moddir')
|
|
|
|
if qemu_moddir == ''
|
2021-11-15 17:16:40 +00:00
|
|
|
qemu_moddir = libdir / 'qemu'
|
2020-08-20 21:52:17 +00:00
|
|
|
endif
|
|
|
|
conf.set_quoted('QEMU_MODDIR', qemu_moddir)
|
|
|
|
|
2021-11-15 17:13:56 +00:00
|
|
|
qemu_datadir = get_option('qemu_datadir')
|
|
|
|
if qemu_datadir == ''
|
|
|
|
qemu_datadir = datadir / 'qemu'
|
|
|
|
endif
|
|
|
|
conf.set_quoted('QEMU_DATADIR', qemu_datadir)
|
|
|
|
|
2020-07-01 01:07:00 +00:00
|
|
|
qemu_user = get_option('qemu_user')
|
2022-01-26 14:14:13 +00:00
|
|
|
qemu_group = get_option('qemu_group')
|
|
|
|
if (qemu_user == '' and qemu_group != '') or (qemu_user != '' and qemu_group == '')
|
|
|
|
error('Please specify both qemu_user and qemu_group or neither of them')
|
|
|
|
endif
|
2022-01-26 14:21:20 +00:00
|
|
|
if qemu_user == '' and qemu_group == ''
|
|
|
|
if host_machine.system() in [ 'freebsd', 'darwin' ]
|
|
|
|
qemu_user = 'root'
|
|
|
|
qemu_group = 'wheel'
|
|
|
|
else
|
2022-01-26 14:09:52 +00:00
|
|
|
# RHEL and CentOS both have ID_LIKE=fedora, SLES has ID_LIKE=suse
|
2023-04-30 09:07:49 +00:00
|
|
|
if (os_release.contains('fedora') or
|
|
|
|
os_release.contains('gentoo') or
|
|
|
|
os_release.contains('suse'))
|
2022-01-26 14:21:20 +00:00
|
|
|
qemu_user = 'qemu'
|
|
|
|
qemu_group = 'qemu'
|
2022-01-26 14:09:52 +00:00
|
|
|
# Ubuntu has ID_LIKE=debian so we need to handle it first
|
2022-01-26 14:21:20 +00:00
|
|
|
elif os_release.contains('ubuntu')
|
|
|
|
qemu_user = 'libvirt-qemu'
|
|
|
|
qemu_group = 'kvm'
|
2023-04-30 09:07:49 +00:00
|
|
|
elif (os_release.contains('arch') or
|
|
|
|
os_release.contains('debian'))
|
2022-01-26 14:09:52 +00:00
|
|
|
qemu_user = 'libvirt-qemu'
|
|
|
|
qemu_group = 'libvirt-qemu'
|
2022-01-26 14:21:20 +00:00
|
|
|
else
|
|
|
|
qemu_user = 'root'
|
|
|
|
qemu_group = 'root'
|
|
|
|
endif
|
|
|
|
endif
|
2020-07-01 01:07:00 +00:00
|
|
|
endif
|
|
|
|
conf.set_quoted('QEMU_USER', qemu_user)
|
|
|
|
conf.set_quoted('QEMU_GROUP', qemu_group)
|
|
|
|
|
|
|
|
qemu_slirp_prog = find_program(
|
|
|
|
'slirp-helper',
|
|
|
|
dirs: [ '/usr/bin', '/usr/libexec' ],
|
|
|
|
required: false
|
|
|
|
)
|
|
|
|
if qemu_slirp_prog.found()
|
2022-10-07 07:43:33 +00:00
|
|
|
qemu_slirp_path = qemu_slirp_prog.full_path()
|
2020-07-01 01:07:00 +00:00
|
|
|
else
|
|
|
|
qemu_slirp_path = '/usr/bin/slirp-helper'
|
|
|
|
endif
|
|
|
|
conf.set_quoted('QEMU_SLIRP_HELPER', qemu_slirp_path)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2020-03-27 17:14:18 +00:00
|
|
|
if not get_option('driver_secrets').disabled() and conf.has('WITH_LIBVIRTD')
|
|
|
|
conf.set('WITH_SECRETS', 1)
|
|
|
|
endif
|
|
|
|
|
2021-05-26 15:46:26 +00:00
|
|
|
if not get_option('driver_test').disabled()
|
2020-04-29 23:05:43 +00:00
|
|
|
conf.set('WITH_TEST', 1)
|
|
|
|
endif
|
|
|
|
|
2020-06-16 21:47:58 +00:00
|
|
|
if not get_option('driver_vbox').disabled() and conf.has('WITH_LIBVIRTD')
|
|
|
|
conf.set('WITH_VBOX', 1)
|
|
|
|
conf.set_quoted('VBOX_XPCOMC_DIR', get_option('vbox_xpcomc_dir'))
|
|
|
|
endif
|
|
|
|
|
2021-01-05 09:18:57 +00:00
|
|
|
if not get_option('driver_vmware').disabled()
|
2020-04-29 23:08:19 +00:00
|
|
|
conf.set('WITH_VMWARE', 1)
|
|
|
|
conf.set('WITH_VMX', 1)
|
|
|
|
endif
|
|
|
|
|
2020-06-19 09:57:23 +00:00
|
|
|
if not get_option('driver_vz').disabled() and parallels_sdk_dep.found()
|
|
|
|
conf.set('WITH_VZ', 1)
|
|
|
|
elif get_option('driver_vz').enabled()
|
|
|
|
error('Parallels Virtualization SDK is needed to build the Virtuozzo driver.')
|
|
|
|
endif
|
|
|
|
|
2020-06-24 09:01:08 +00:00
|
|
|
if not get_option('secdriver_apparmor').disabled() and apparmor_dep.found()
|
|
|
|
conf.set('WITH_SECDRIVER_APPARMOR', 1)
|
|
|
|
elif get_option('secdriver_apparmor').enabled()
|
|
|
|
error('You must install the AppArmor development package in order to compile libvirt.')
|
|
|
|
endif
|
|
|
|
|
|
|
|
if not get_option('secdriver_selinux').disabled() and selinux_dep.found()
|
|
|
|
conf.set('WITH_SECDRIVER_SELINUX', 1)
|
|
|
|
elif get_option('secdriver_selinux').enabled()
|
|
|
|
error('You must install the libselinux development package in order to compile libvirt.')
|
|
|
|
endif
|
|
|
|
|
2020-04-30 11:35:50 +00:00
|
|
|
if conf.has('WITH_QEMU') or conf.has('WITH_LXC') or conf.has('WITH_NETWORK')
|
|
|
|
conf.set('WITH_BRIDGE', 1)
|
|
|
|
endif
|
|
|
|
|
2020-07-29 12:22:35 +00:00
|
|
|
|
2020-04-24 13:14:37 +00:00
|
|
|
# check for storage drivers
|
|
|
|
|
|
|
|
use_storage = false
|
|
|
|
|
2020-04-30 08:43:08 +00:00
|
|
|
if conf.has('WITH_LIBVIRTD')
|
|
|
|
if not get_option('storage_dir').disabled()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_DIR', 1)
|
|
|
|
endif
|
2020-06-24 09:53:47 +00:00
|
|
|
|
|
|
|
if not get_option('storage_disk').disabled() and devmapper_dep.found() and libparted_dep.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_DISK', 1)
|
|
|
|
elif get_option('storage_disk').enabled()
|
|
|
|
error('You must install libparted and libdevmapper to compile libvirt with disk storage driver')
|
|
|
|
endif
|
2020-04-30 08:55:52 +00:00
|
|
|
|
|
|
|
if not get_option('storage_fs').disabled()
|
|
|
|
fs_enable = true
|
|
|
|
|
|
|
|
# storage-fs does not work on macOS
|
|
|
|
if host_machine.system() == 'darwin'
|
|
|
|
fs_enable = false
|
|
|
|
endif
|
|
|
|
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if fs_enable and not cc.check_header('mntent.h')
|
2020-04-30 08:55:52 +00:00
|
|
|
if get_option('storage_fs').enabled()
|
|
|
|
error('<mntent.h> is required for the FS storage driver')
|
|
|
|
else
|
|
|
|
fs_enable = false
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if fs_enable
|
|
|
|
mount_prog = find_program('mount', required: get_option('storage_fs'), dirs: libvirt_sbin_path)
|
|
|
|
umount_prog = find_program('umount', required: get_option('storage_fs'), dirs: libvirt_sbin_path)
|
|
|
|
mkfs_prog = find_program('mkfs', required: get_option('storage_fs'), dirs: libvirt_sbin_path)
|
|
|
|
|
|
|
|
if not mount_prog.found() or not umount_prog.found() or not mkfs_prog.found()
|
|
|
|
fs_enable = false
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if fs_enable
|
|
|
|
use_storage = true
|
|
|
|
|
|
|
|
conf.set('WITH_STORAGE_FS', 1)
|
2022-10-07 07:43:33 +00:00
|
|
|
conf.set_quoted('MOUNT', mount_prog.full_path())
|
|
|
|
conf.set_quoted('UMOUNT', umount_prog.full_path())
|
|
|
|
conf.set_quoted('MKFS', mkfs_prog.full_path())
|
2020-04-30 08:55:52 +00:00
|
|
|
endif
|
|
|
|
endif
|
2020-04-30 09:07:59 +00:00
|
|
|
|
|
|
|
if not get_option('storage_gluster').disabled() and glusterfs_dep.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_GLUSTER', 1)
|
|
|
|
elif get_option('storage_gluster').enabled()
|
|
|
|
error('Need glusterfs (libgfapi) for gluster storage driver')
|
|
|
|
endif
|
2020-04-30 09:09:45 +00:00
|
|
|
|
|
|
|
if not get_option('storage_iscsi').disabled() and iscsiadm_prog.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_ISCSI', 1)
|
|
|
|
elif get_option('storage_iscsi').enabled()
|
|
|
|
error('We need iscsiadm for iSCSI storage driver')
|
|
|
|
endif
|
2020-04-30 08:59:58 +00:00
|
|
|
|
|
|
|
if not get_option('storage_iscsi_direct').disabled() and libiscsi_dep.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_ISCSI_DIRECT', 1)
|
|
|
|
elif get_option('storage_iscsi_direct').enabled()
|
|
|
|
error('Need libiscsi for iscsi-direct storage driver')
|
|
|
|
endif
|
2020-04-30 09:00:49 +00:00
|
|
|
|
|
|
|
if not get_option('storage_lvm').disabled()
|
|
|
|
lvm_enable = true
|
|
|
|
lvm_progs = [
|
|
|
|
'pvcreate', 'vgcreate', 'lvcreate',
|
|
|
|
'pvremove', 'vgremove', 'lvremove',
|
|
|
|
'lvchange', 'vgchange', 'vgscan',
|
|
|
|
'pvs', 'vgs', 'lvs',
|
|
|
|
]
|
|
|
|
foreach name : lvm_progs
|
|
|
|
set_variable(
|
|
|
|
'@0@_prog'.format(name),
|
|
|
|
find_program(name, required: get_option('storage_lvm'), dirs: libvirt_sbin_path)
|
|
|
|
)
|
|
|
|
if not get_variable('@0@_prog'.format(name)).found()
|
|
|
|
lvm_enable = false
|
|
|
|
endif
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
if lvm_enable
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_LVM', 1)
|
|
|
|
|
|
|
|
foreach name : lvm_progs
|
2022-10-07 07:43:33 +00:00
|
|
|
conf.set_quoted(name.to_upper(), get_variable('@0@_prog'.format(name)).full_path())
|
2020-04-30 09:00:49 +00:00
|
|
|
endforeach
|
|
|
|
endif
|
|
|
|
endif
|
2020-04-30 09:01:46 +00:00
|
|
|
|
|
|
|
if not get_option('storage_mpath').disabled() and host_machine.system() == 'linux' and devmapper_dep.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_MPATH', 1)
|
|
|
|
elif get_option('storage_mpath').enabled()
|
|
|
|
error('mpath storage driver is supported only on Linux and you must install libdevmapper')
|
|
|
|
endif
|
2020-06-19 15:09:22 +00:00
|
|
|
|
|
|
|
if not get_option('storage_rbd').disabled() and rbd_dep.found()
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_RBD', 1)
|
|
|
|
elif get_option('storage_rbd').enabled()
|
|
|
|
error('You must install the librbd library & headers to compile libvirt')
|
|
|
|
endif
|
2020-04-30 09:31:33 +00:00
|
|
|
|
|
|
|
if not get_option('storage_scsi').disabled() and host_machine.system() == 'linux'
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_SCSI', 1)
|
|
|
|
endif
|
2020-04-30 09:10:07 +00:00
|
|
|
|
2020-04-30 09:11:21 +00:00
|
|
|
if not get_option('storage_vstorage').disabled()
|
2021-01-19 13:34:26 +00:00
|
|
|
vstorage_enable = true
|
|
|
|
if host_machine.system() != 'linux'
|
2021-05-26 15:47:58 +00:00
|
|
|
vstorage_enable = false
|
2021-05-26 15:47:05 +00:00
|
|
|
if get_option('storage_vstorage').enabled()
|
2021-01-19 13:34:26 +00:00
|
|
|
error('Vstorage is supported only on Linux')
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if vstorage_enable
|
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_VSTORAGE', 1)
|
|
|
|
endif
|
2020-04-30 09:11:21 +00:00
|
|
|
endif
|
2020-04-30 09:12:03 +00:00
|
|
|
|
|
|
|
if not get_option('storage_zfs').disabled()
|
2021-09-14 07:38:44 +00:00
|
|
|
use_storage = true
|
|
|
|
conf.set('WITH_STORAGE_ZFS', 1)
|
2020-04-30 09:12:03 +00:00
|
|
|
endif
|
2020-04-30 08:43:08 +00:00
|
|
|
endif
|
|
|
|
|
2020-04-24 13:14:37 +00:00
|
|
|
if use_storage
|
|
|
|
conf.set('WITH_STORAGE', 1)
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-07-24 14:43:48 +00:00
|
|
|
# build feature options
|
|
|
|
|
|
|
|
chrdev_lock_files = get_option('chrdev_lock_files')
|
|
|
|
if chrdev_lock_files == '' and host_machine.system() == 'linux'
|
|
|
|
chrdev_lock_files = '/var/lock'
|
|
|
|
endif
|
|
|
|
if chrdev_lock_files != ''
|
|
|
|
conf.set_quoted('VIR_CHRDEV_LOCK_FILE_PATH', chrdev_lock_files)
|
|
|
|
endif
|
|
|
|
|
2020-07-24 14:44:39 +00:00
|
|
|
driver_modules_flags = []
|
|
|
|
if conf.has('WITH_LIBVIRTD')
|
2020-09-01 11:27:44 +00:00
|
|
|
if not conf.has('WITH_DLFCN_H') or not dlopen_dep.found()
|
2020-07-24 14:44:39 +00:00
|
|
|
error('You must have dlfcn.h / dlopen() support to build driver modules')
|
|
|
|
endif
|
|
|
|
driver_modules_flags = libvirt_export_dynamic
|
|
|
|
endif
|
|
|
|
|
2020-07-24 14:44:59 +00:00
|
|
|
if host_machine.system() == 'linux'
|
|
|
|
dtrace_prog = find_program('dtrace', required: get_option('dtrace'), dirs: libvirt_sbin_path)
|
|
|
|
if dtrace_prog.found()
|
|
|
|
conf.set('WITH_DTRACE_PROBES', 1)
|
|
|
|
endif
|
2021-01-18 23:08:23 +00:00
|
|
|
dtrace_command = [ 'env', 'CC=' + ' '.join(meson.get_compiler('c').cmd_array()), dtrace_prog ]
|
2020-07-24 14:44:59 +00:00
|
|
|
endif
|
|
|
|
|
2020-07-24 14:45:36 +00:00
|
|
|
if not get_option('host_validate').disabled() and host_machine.system() != 'windows'
|
|
|
|
conf.set('WITH_HOST_VALIDATE', 1)
|
|
|
|
elif get_option('host_validate').enabled()
|
|
|
|
error('virt-host-validate is not supported on Windows')
|
|
|
|
endif
|
|
|
|
|
2020-07-16 15:36:03 +00:00
|
|
|
if get_option('init_script') == 'check'
|
|
|
|
if meson.is_cross_build()
|
|
|
|
init_script = 'none'
|
|
|
|
elif find_program('systemctl', required: false).found()
|
|
|
|
init_script = 'systemd'
|
|
|
|
elif find_program('openrc', required: false).found()
|
|
|
|
init_script = 'openrc'
|
|
|
|
else
|
|
|
|
init_script = 'none'
|
|
|
|
endif
|
|
|
|
else
|
|
|
|
init_script = get_option('init_script')
|
|
|
|
endif
|
|
|
|
|
2020-07-24 14:45:58 +00:00
|
|
|
loader_nvram = get_option('loader_nvram')
|
|
|
|
if loader_nvram != ''
|
|
|
|
if (loader_nvram.split(':').length() % 2) != 0
|
|
|
|
error('Malformed loader_nvram option')
|
|
|
|
endif
|
|
|
|
conf.set_quoted('DEFAULT_LOADER_NVRAM', loader_nvram)
|
|
|
|
endif
|
|
|
|
|
2020-07-16 16:09:20 +00:00
|
|
|
if not get_option('login_shell').disabled() and host_machine.system() == 'linux'
|
|
|
|
conf.set('WITH_LOGIN_SHELL', 1)
|
|
|
|
elif get_option('login_shell').enabled()
|
|
|
|
error('virt-login-shell is supported on Linux only')
|
|
|
|
endif
|
|
|
|
|
2020-06-24 11:27:59 +00:00
|
|
|
if not get_option('nss').disabled()
|
|
|
|
use_nss = true
|
2024-09-04 14:10:17 +00:00
|
|
|
if not json_c_dep.found()
|
2020-06-24 11:27:59 +00:00
|
|
|
if get_option('nss').enabled()
|
2024-09-04 14:10:17 +00:00
|
|
|
error('Can\'t build nss plugin without json-c')
|
2020-06-24 11:27:59 +00:00
|
|
|
else
|
|
|
|
use_nss = false
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_nss and not conf.has('WITH_NETWORK')
|
|
|
|
if get_option('nss').enabled()
|
|
|
|
error('Can\'t build nss plugin without network')
|
|
|
|
else
|
|
|
|
use_nss = false
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
meson: Check header usability
This fixes cross-building in some scenarios.
Specifically, when building for armv7l on x86_64, has_header()
will see the x86_64 version of the linux/kmv.h header and
consider it to be usable. Later, when an attempt is made to
actually include it, the compiler will quickly realize that
things can't quite work.
The reason why we haven't hit this in our CI is that we only ever
install the foreign version of header files. When building the
Debian package, however, some of the Debian-specific tooling will
bring in the native version of the Linux headers in addition to
the foreign one, causing meson to misreport the header's
availability status.
Checking for actual usability, as opposed to mere presence, of
headers is enough to make things work correctly in all cases.
The meson documentation recommends using has_header() instead of
check_header() whenever possible for performance reasons, but
while testing this change on fairly old and underpowered hardware
I haven't been able to measure any meaningful slowdown.
https://bugs.debian.org/1024504
Suggested-by: Helmut Grohne <helmut@subdivi.de>
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2023-04-27 09:30:59 +00:00
|
|
|
if use_nss and not cc.check_header('nss.h')
|
2020-06-24 11:27:59 +00:00
|
|
|
if get_option('nss').enabled()
|
|
|
|
error('Can\'t build nss plugin without nss.h')
|
|
|
|
else
|
|
|
|
use_nss = false
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_nss
|
|
|
|
conf.set('WITH_NSS', 1)
|
|
|
|
|
|
|
|
if cc.has_type('struct gaih_addrtuple', prefix: '#include <nss.h>')
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_STRUCT_GAIH_ADDRTUPLE', 1)
|
2020-06-24 11:27:59 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
if (cc.has_type('ns_mtab', prefix: '#include <nsswitch.h>') and
|
|
|
|
cc.has_type('nss_module_unregister_fn', prefix: '#include <nsswitch.h>'))
|
|
|
|
conf.set('WITH_BSD_NSS', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2020-07-01 00:47:06 +00:00
|
|
|
if not get_option('numad').disabled() and numactl_dep.found()
|
|
|
|
numad_prog = find_program('numad', required: get_option('numad'), dirs: libvirt_sbin_path)
|
|
|
|
if numad_prog.found()
|
2020-09-01 11:27:44 +00:00
|
|
|
conf.set('WITH_NUMAD', 1)
|
2022-10-07 07:43:33 +00:00
|
|
|
conf.set_quoted('NUMAD', numad_prog.full_path())
|
2020-07-01 00:47:06 +00:00
|
|
|
endif
|
|
|
|
elif get_option('numad').enabled()
|
|
|
|
error('You must have numactl enabled for numad support.')
|
|
|
|
endif
|
|
|
|
|
2020-04-30 09:30:54 +00:00
|
|
|
# nwfilter should only be compiled for linux, and only if the
|
|
|
|
# libvirt daemon is also being compiled
|
|
|
|
if conf.has('WITH_LIBVIRTD') and host_machine.system() == 'linux'
|
|
|
|
conf.set('WITH_NWFILTER', 1)
|
|
|
|
endif
|
|
|
|
|
2020-07-01 00:58:23 +00:00
|
|
|
if not get_option('pm_utils').disabled()
|
|
|
|
use_pm_utils = true
|
2020-09-15 12:22:57 +00:00
|
|
|
if init_script == 'systemd'
|
2020-07-01 00:58:23 +00:00
|
|
|
use_pm_utils = false
|
|
|
|
endif
|
|
|
|
|
|
|
|
if use_pm_utils
|
|
|
|
conf.set('WITH_PM_UTILS', 1)
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2024-04-16 14:32:26 +00:00
|
|
|
if not get_option('ssh_proxy').disabled() and conf.has('WITH_DECL_STRUCT_SOCKADDR_VM')
|
|
|
|
conf.set('WITH_SSH_PROXY', 1)
|
|
|
|
elif get_option('ssh_proxy').enabled()
|
|
|
|
error('ssh proxy requires vm_sockets.h which wasn\'t found')
|
|
|
|
endif
|
|
|
|
|
2020-04-29 22:43:59 +00:00
|
|
|
if not get_option('sysctl_config').disabled() and host_machine.system() == 'linux'
|
|
|
|
conf.set('WITH_SYSCTL', 1)
|
|
|
|
elif get_option('sysctl_config').enabled()
|
|
|
|
error('sysctl configuration is supported only on linux')
|
|
|
|
endif
|
|
|
|
|
2024-02-09 14:20:58 +00:00
|
|
|
if not get_option('userfaultfd_sysctl').disabled() and conf.has('WITH_SYSCTL')
|
|
|
|
conf.set('WITH_USERFAULTFD_SYSCTL', 1)
|
|
|
|
elif get_option('userfaultfd_sysctl').enabled()
|
|
|
|
error('userfaultfd_sysctl option requires sysctl_config to be enabled')
|
|
|
|
endif
|
|
|
|
|
2020-07-01 01:07:37 +00:00
|
|
|
conf.set_quoted('TLS_PRIORITY', get_option('tls_priority'))
|
|
|
|
|
2023-10-03 12:46:56 +00:00
|
|
|
|
|
|
|
# test options
|
|
|
|
|
2023-10-03 13:39:02 +00:00
|
|
|
tests_enabled = [ not get_option('tests').disabled() ]
|
|
|
|
if tests_enabled[0] and \
|
2023-10-03 12:46:56 +00:00
|
|
|
cc.get_id() == 'clang' and \
|
|
|
|
not supported_cc_flags.contains('-fsemantic-interposition') \
|
|
|
|
and get_option('optimization') != '0'
|
|
|
|
# If CLang doesn't support -fsemantic-interposition then our
|
|
|
|
# mocking doesn't work. The best we can do is to not run the
|
|
|
|
# test suite.
|
2023-10-03 12:52:45 +00:00
|
|
|
msg = 'Forcibly disabling tests because CLang lacks -fsemantic-interposition. Update CLang or disable optimization'
|
|
|
|
if get_option('tests').enabled()
|
|
|
|
error(msg)
|
|
|
|
endif
|
2023-10-03 13:39:02 +00:00
|
|
|
tests_enabled = [ false, '!!! @0@ !!!'.format(msg) ]
|
2023-10-03 12:46:56 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
if get_option('expensive_tests').auto()
|
2023-10-03 13:39:02 +00:00
|
|
|
use_expensive_tests = not git and tests_enabled[0]
|
2023-10-03 12:46:56 +00:00
|
|
|
else
|
|
|
|
use_expensive_tests = get_option('expensive_tests').enabled()
|
2023-10-03 13:39:02 +00:00
|
|
|
if use_expensive_tests and not tests_enabled[0]
|
2023-10-03 12:53:08 +00:00
|
|
|
error('cannot enable expensive tests when tests are disabled')
|
|
|
|
endif
|
2023-10-03 12:46:56 +00:00
|
|
|
endif
|
|
|
|
|
|
|
|
coverage_flags = []
|
|
|
|
if get_option('test_coverage')
|
|
|
|
coverage_flags = [
|
|
|
|
'-fprofile-arcs',
|
|
|
|
'-ftest-coverage',
|
|
|
|
]
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-06-30 18:18:18 +00:00
|
|
|
# Various definitions
|
|
|
|
|
|
|
|
# Python3 < 3.7 treats the C locale as 7-bit only. We must force env vars so
|
|
|
|
# it treats it as UTF-8 regardless of the user's locale.
|
|
|
|
runutf8 = [ 'LC_ALL=', 'LANG=C', 'LC_CTYPE=en_US.UTF-8' ]
|
|
|
|
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
# define top include directory
|
|
|
|
|
|
|
|
top_inc_dir = include_directories('.')
|
|
|
|
|
2023-04-17 11:54:01 +00:00
|
|
|
keycodemapdb = subproject('keycodemapdb')
|
2020-08-03 06:50:49 +00:00
|
|
|
|
2020-06-17 22:53:18 +00:00
|
|
|
# include remaining subdirs
|
|
|
|
|
|
|
|
subdir('scripts')
|
|
|
|
|
2020-06-17 22:53:35 +00:00
|
|
|
subdir('include')
|
|
|
|
|
2020-05-05 08:14:34 +00:00
|
|
|
subdir('src')
|
|
|
|
|
2020-06-24 11:32:04 +00:00
|
|
|
subdir('tools')
|
|
|
|
|
2023-10-03 13:39:02 +00:00
|
|
|
if tests_enabled[0]
|
2020-10-08 12:46:03 +00:00
|
|
|
subdir('tests')
|
2023-10-03 12:58:56 +00:00
|
|
|
else
|
|
|
|
# Ensure that 'meson test' fails when tests are disabled, as opposed to
|
|
|
|
# misleadingly succeeding at doing absolutely nothing
|
|
|
|
test(
|
|
|
|
'tests-are-disabled',
|
|
|
|
python3_prog, args: [ '-c', 'raise Exception("tests are disabled")' ],
|
|
|
|
)
|
2020-10-08 12:46:03 +00:00
|
|
|
endif
|
2020-05-21 14:41:32 +00:00
|
|
|
|
2020-06-25 16:14:13 +00:00
|
|
|
subdir('examples')
|
|
|
|
|
2020-06-29 19:55:39 +00:00
|
|
|
subdir('po')
|
|
|
|
|
2020-10-08 12:39:38 +00:00
|
|
|
gen_docs = not get_option('docs').disabled()
|
|
|
|
if gen_docs
|
|
|
|
subdir('docs')
|
|
|
|
endif
|
2020-06-18 00:20:37 +00:00
|
|
|
|
2020-06-25 21:29:43 +00:00
|
|
|
subdir('build-aux')
|
|
|
|
|
2020-06-17 22:53:18 +00:00
|
|
|
|
2020-06-29 19:56:09 +00:00
|
|
|
# install pkgconfig files
|
|
|
|
pkgconfig_files = [
|
|
|
|
'libvirt.pc.in',
|
|
|
|
'libvirt-qemu.pc.in',
|
|
|
|
'libvirt-lxc.pc.in',
|
|
|
|
'libvirt-admin.pc.in',
|
|
|
|
]
|
|
|
|
|
2022-03-29 09:43:36 +00:00
|
|
|
pkgconfig_conf = configuration_data({
|
|
|
|
'VERSION': meson.project_version(),
|
|
|
|
'datadir': datadir,
|
|
|
|
'datarootdir': datadir,
|
|
|
|
'exec_prefix': prefix,
|
|
|
|
'includedir': includedir,
|
|
|
|
'libdir': libdir,
|
|
|
|
'prefix': prefix,
|
|
|
|
})
|
2020-06-29 19:56:09 +00:00
|
|
|
|
|
|
|
pkgconfig_dir = libdir / 'pkgconfig'
|
|
|
|
|
|
|
|
foreach file : pkgconfig_files
|
|
|
|
configure_file(
|
|
|
|
input: file,
|
|
|
|
output: '@BASENAME@',
|
|
|
|
configuration: pkgconfig_conf,
|
|
|
|
install: true,
|
|
|
|
install_dir: pkgconfig_dir,
|
|
|
|
)
|
|
|
|
endforeach
|
|
|
|
|
2020-07-28 15:51:53 +00:00
|
|
|
|
|
|
|
# generate dist files
|
|
|
|
|
|
|
|
if git
|
2022-03-29 09:43:36 +00:00
|
|
|
spec_conf = configuration_data({
|
|
|
|
'VERSION': meson.project_version(),
|
|
|
|
})
|
2020-07-28 15:51:53 +00:00
|
|
|
|
2022-08-08 15:40:56 +00:00
|
|
|
configure_file(
|
|
|
|
input: 'libvirt.spec.in',
|
|
|
|
output: '@BASENAME@',
|
|
|
|
configuration: spec_conf,
|
|
|
|
)
|
2020-07-28 15:51:53 +00:00
|
|
|
|
2022-10-07 07:43:33 +00:00
|
|
|
authors = run_command(python3_prog, meson_gen_authors_prog.full_path(),
|
2022-01-22 19:30:11 +00:00
|
|
|
env: runutf8, check: true)
|
2020-08-25 15:52:24 +00:00
|
|
|
authors_file = 'AUTHORS.rst.in'
|
2020-07-28 15:51:53 +00:00
|
|
|
|
2022-03-29 09:43:36 +00:00
|
|
|
authors_conf = configuration_data({
|
|
|
|
'contributorslist': authors.stdout(),
|
|
|
|
})
|
2020-07-28 15:51:53 +00:00
|
|
|
|
|
|
|
configure_file(
|
|
|
|
input: authors_file,
|
|
|
|
output: '@BASENAME@',
|
|
|
|
configuration: authors_conf,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Using return values from configure_file in add_dist_script is possible since 0.55.0
|
|
|
|
dist_files = [
|
|
|
|
'libvirt.spec',
|
2020-08-25 15:52:24 +00:00
|
|
|
'AUTHORS.rst',
|
2020-07-28 15:51:53 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
foreach file : dist_files
|
|
|
|
meson.add_dist_script(
|
2024-04-29 07:49:17 +00:00
|
|
|
meson_python_prog.full_path(), python3_prog.full_path(),
|
|
|
|
meson_dist_prog.full_path(), file
|
2020-07-28 15:51:53 +00:00
|
|
|
)
|
|
|
|
endforeach
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
2020-08-03 06:50:49 +00:00
|
|
|
# generate meson-config.h file
|
|
|
|
configure_file(output: 'meson-config.h', configuration: conf)
|
2020-04-30 12:56:21 +00:00
|
|
|
|
|
|
|
|
2020-07-01 00:52:45 +00:00
|
|
|
# generate run helper
|
2022-03-29 09:43:36 +00:00
|
|
|
run_conf = configuration_data({
|
2022-10-07 07:31:32 +00:00
|
|
|
'abs_builddir': meson.project_build_root(),
|
|
|
|
'abs_top_builddir': meson.project_build_root(),
|
2022-03-29 09:43:36 +00:00
|
|
|
})
|
|
|
|
|
2020-07-01 00:52:45 +00:00
|
|
|
configure_file(
|
|
|
|
input: 'run.in',
|
2020-08-25 16:30:57 +00:00
|
|
|
output: '@BASENAME@',
|
2020-07-01 00:52:45 +00:00
|
|
|
configuration: run_conf,
|
|
|
|
)
|
2022-01-22 19:30:11 +00:00
|
|
|
run_command('chmod', 'a+x', meson.current_build_dir() / 'run', check: true)
|
2020-07-01 00:52:45 +00:00
|
|
|
|
|
|
|
|
2020-04-30 12:56:21 +00:00
|
|
|
# print configuration summary
|
|
|
|
|
2020-07-29 12:22:35 +00:00
|
|
|
driver_summary = {
|
2024-09-24 07:32:22 +00:00
|
|
|
'Bhyve': conf.has('WITH_BHYVE'),
|
2021-05-12 17:01:31 +00:00
|
|
|
'Cloud-Hypervisor': conf.has('WITH_CH'),
|
2020-07-22 15:53:26 +00:00
|
|
|
'ESX': conf.has('WITH_ESX'),
|
2020-04-30 10:24:29 +00:00
|
|
|
'Hyper-V': conf.has('WITH_HYPERV'),
|
2020-04-28 20:52:30 +00:00
|
|
|
'Interface': conf.has('WITH_INTERFACE'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'Libvirtd': conf.has('WITH_LIBVIRTD'),
|
|
|
|
'libxl': conf.has('WITH_LIBXL'),
|
|
|
|
'LXC': conf.has('WITH_LXC'),
|
|
|
|
'Network': conf.has('WITH_NETWORK'),
|
|
|
|
'OpenVZ': conf.has('WITH_OPENVZ'),
|
|
|
|
'QEMU': conf.has('WITH_QEMU'),
|
|
|
|
'Remote': conf.has('WITH_REMOTE'),
|
|
|
|
'Test': conf.has('WITH_TEST'),
|
|
|
|
'VBox': conf.has('WITH_VBOX'),
|
|
|
|
'VMware': conf.has('WITH_VMWARE'),
|
|
|
|
'vz': conf.has('WITH_VZ'),
|
2020-07-29 12:22:35 +00:00
|
|
|
}
|
|
|
|
summary(driver_summary, section: 'Drivers', bool_yn: true)
|
|
|
|
|
2020-04-30 08:43:08 +00:00
|
|
|
storagedriver_summary = {
|
|
|
|
'Dir': conf.has('WITH_STORAGE_DIR'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'Disk': conf.has('WITH_STORAGE_DISK'),
|
2020-04-30 08:55:52 +00:00
|
|
|
'FS': conf.has('WITH_STORAGE_FS'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'Gluster': conf.has('WITH_STORAGE_GLUSTER'),
|
2020-04-30 09:09:45 +00:00
|
|
|
'iSCSI': conf.has('WITH_STORAGE_ISCSI'),
|
2020-04-30 08:59:58 +00:00
|
|
|
'iscsi-direct': conf.has('WITH_STORAGE_ISCSI_DIRECT'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'LVM': conf.has('WITH_STORAGE_LVM'),
|
2020-04-30 09:01:46 +00:00
|
|
|
'mpath': conf.has('WITH_STORAGE_MPATH'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'NetFS': conf.has('WITH_STORAGE_FS'),
|
2020-06-19 15:09:22 +00:00
|
|
|
'RBD': conf.has('WITH_STORAGE_RBD'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'SCSI': conf.has('WITH_STORAGE_SCSI'),
|
2020-04-30 09:11:21 +00:00
|
|
|
'Virtuozzo storage': conf.has('WITH_STORAGE_VSTORAGE'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'ZFS': conf.has('WITH_STORAGE_ZFS'),
|
2020-04-30 08:43:08 +00:00
|
|
|
}
|
|
|
|
summary(storagedriver_summary, section: 'Storage Drivers', bool_yn: true)
|
|
|
|
|
2020-06-24 09:01:08 +00:00
|
|
|
secdriver_summary = {
|
|
|
|
'AppArmor': conf.has('WITH_SECDRIVER_APPARMOR'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'SELinux': conf.has('WITH_SECDRIVER_SELINUX'),
|
2020-06-24 09:01:08 +00:00
|
|
|
}
|
|
|
|
summary(secdriver_summary, section: 'Security Drivers', bool_yn: true)
|
|
|
|
|
2020-07-24 14:44:39 +00:00
|
|
|
drivermod_summary = {
|
|
|
|
'driver_modules': driver_modules_flags.length() > 0,
|
|
|
|
}
|
|
|
|
summary(drivermod_summary, section: 'Driver Loadable Modules', bool_yn: true)
|
|
|
|
|
2020-03-02 14:14:14 +00:00
|
|
|
libs_summary = {
|
|
|
|
'acl': acl_dep.found(),
|
2020-07-29 12:19:59 +00:00
|
|
|
'apparmor': apparmor_dep.found(),
|
2020-07-29 12:20:15 +00:00
|
|
|
'attr': attr_dep.found(),
|
2020-07-29 12:20:29 +00:00
|
|
|
'audit': audit_dep.found(),
|
2020-06-24 11:24:53 +00:00
|
|
|
'bash_completion': bash_completion_dep.found(),
|
2020-06-24 11:25:04 +00:00
|
|
|
'blkid': blkid_dep.found(),
|
2020-06-24 11:25:16 +00:00
|
|
|
'capng': capng_dep.found(),
|
2020-06-24 11:25:26 +00:00
|
|
|
'curl': curl_dep.found(),
|
2020-10-08 11:51:43 +00:00
|
|
|
'devmapper': devmapper_dep.found(),
|
2020-04-29 08:34:31 +00:00
|
|
|
'dlopen': dlopen_dep.found(),
|
2020-07-01 01:10:10 +00:00
|
|
|
'fuse': fuse_dep.found(),
|
2020-06-24 11:26:27 +00:00
|
|
|
'glusterfs': glusterfs_dep.found(),
|
2024-02-08 15:44:15 +00:00
|
|
|
'json-c': json_c_dep.found(),
|
2024-09-24 07:26:43 +00:00
|
|
|
'libbsd': libbsd_dep.found(),
|
2020-06-24 11:26:48 +00:00
|
|
|
'libiscsi': libiscsi_dep.found(),
|
2020-10-08 11:51:43 +00:00
|
|
|
'libkvm': libkvm_dep.found(),
|
qemu: try to connect to nbdkit early to detect errors
When using nbdkit to serve a network disk source, the nbdkit process
will start and wait for an nbd connection before actually attempting to
connect to the (remote) disk location. Because of this, nbdkit will not
report an error until after qemu is launched and tries to read from the
disk. This results in a fairly user-unfriendly error saying that qemu
was unable to start because "Requested export not available".
Ideally we'd like to be able to tell the user *why* the export is not
available, but this sort of information is only available to nbdkit, not
qemu. It could be because the url was incorrect, or because of an
authentication failure, or one of many other possibilities.
To make this friendlier for users and easier to detect
misconfigurations, try to connect to nbdkit immediately after starting
nbdkit and before we try to start qemu. This requires adding a
dependency on libnbd. If an error occurs when connecting to nbdkit, read
back from the nbdkit error log and provide that information in the error
report from qemuNbdkitProcessStart().
User-visible change demonstrated below:
Previous error:
$ virsh start nbdkit-test
2023-01-18 19:47:45.778+0000: 30895: error : virNetClientProgramDispatchError:172 : internal
error: process exited while connecting to monitor: 2023-01-18T19:47:45.704658Z
qemu-system-x86_64: -blockdev {"driver":"nbd","server":{"type":"unix",
"path":"/var/lib/libvirt/qemu/domain-1-nbdkit-test/nbdkit-libvirt-1-storage.socket"},
"node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}: Requested export not
available
error: Failed to start domain 'nbdkit-test'
error: internal error: process exited while connecting to monitor: 2023-01-18T19:47:45.704658Z
qemu-system-x86_64: -blockdev {"driver":"nbd","server":{"type":"unix",
"path":"/var/lib/libvirt/qemu/domain-1-nbdkit-test/nbdkit-libvirt-1-storage.socket"},
"node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}: Requested export not
available
After this change:
$ virsh start nbdkit-test
2023-01-18 19:44:36.242+0000: 30895: error : virNetClientProgramDispatchError:172 : internal
error: Failed to connect to nbdkit for 'http://localhost:8888/nonexistent.iso': nbdkit: curl[1]:
error: problem doing HEAD request to fetch size of URL [http://localhost:8888/nonexistent.iso]:
HTTP response code said error: The requested URL returned error: 404
error: Failed to start domain 'nbdkit-test'
error: internal error: Failed to connect to nbdkit for 'http://localhost:8888/nonexistent.iso]:
error: problem doing HEAD request to fetch size of URL [http://localhost:8888/nonexistent.iso]:
HTTP response code said error: The requested URL returned error: 404
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2022-12-16 23:10:49 +00:00
|
|
|
'libnbd': libnbd_dep.found(),
|
2020-04-29 10:08:33 +00:00
|
|
|
'libnl': libnl_dep.found(),
|
2020-10-08 11:51:43 +00:00
|
|
|
'libparted': libparted_dep.found(),
|
2020-06-24 11:27:03 +00:00
|
|
|
'libpcap': libpcap_dep.found(),
|
2020-06-24 11:27:12 +00:00
|
|
|
'libssh': libssh_dep.found(),
|
2020-04-29 09:07:42 +00:00
|
|
|
'libssh2': libssh2_dep.found(),
|
2020-10-08 11:51:43 +00:00
|
|
|
'libutil': libutil_dep.found(),
|
2023-03-23 08:15:35 +00:00
|
|
|
'netcf': netcf_dep.found(),
|
2020-06-24 11:27:40 +00:00
|
|
|
'NLS': have_gnu_gettext_tools,
|
2020-08-03 15:17:02 +00:00
|
|
|
'numactl': numactl_dep.found(),
|
2020-04-29 08:18:37 +00:00
|
|
|
'openwsman': openwsman_dep.found(),
|
2020-10-08 11:51:43 +00:00
|
|
|
'parallels-sdk': parallels_sdk_dep.found(),
|
2020-04-30 09:35:51 +00:00
|
|
|
'pciaccess': pciaccess_dep.found(),
|
2020-04-29 09:32:41 +00:00
|
|
|
'polkit': conf.has('WITH_POLKIT'),
|
2020-04-29 09:37:40 +00:00
|
|
|
'rbd': rbd_dep.found(),
|
2020-07-29 12:20:43 +00:00
|
|
|
'readline': readline_dep.found(),
|
2023-03-23 08:15:35 +00:00
|
|
|
'sanlock': sanlock_dep.found(),
|
2020-07-29 12:21:14 +00:00
|
|
|
'sasl': sasl_dep.found(),
|
2020-07-29 12:21:29 +00:00
|
|
|
'selinux': selinux_dep.found(),
|
2020-07-29 12:21:43 +00:00
|
|
|
'udev': udev_dep.found(),
|
2020-06-24 01:17:42 +00:00
|
|
|
'xdr': xdr_dep.found(),
|
2020-03-02 14:14:14 +00:00
|
|
|
}
|
|
|
|
summary(libs_summary, section: 'Libraries', bool_yn: true)
|
|
|
|
|
2020-06-16 20:54:17 +00:00
|
|
|
win_summary = {
|
|
|
|
'MinGW': host_machine.system() == 'windows',
|
2020-07-01 01:08:06 +00:00
|
|
|
'windres': host_machine.system() == 'windows',
|
2020-06-16 20:54:17 +00:00
|
|
|
}
|
|
|
|
summary(win_summary, section: 'Windows', bool_yn: true)
|
|
|
|
|
2020-04-30 12:56:21 +00:00
|
|
|
test_summary = {
|
2020-09-22 13:15:49 +00:00
|
|
|
'Expensive': use_expensive_tests,
|
2020-04-30 12:56:21 +00:00
|
|
|
'Coverage': coverage_flags.length() > 0,
|
|
|
|
}
|
|
|
|
summary(test_summary, section: 'Test suite', bool_yn: true)
|
2020-07-24 14:35:03 +00:00
|
|
|
|
2020-07-24 14:45:58 +00:00
|
|
|
if conf.has('DEFAULT_LOADER_NVRAM')
|
|
|
|
loader_res = '@0@ !!! Using this configure option is strongly discouraged !!!'.format(conf.get_unquoted('DEFAULT_LOADER_NVRAM'))
|
|
|
|
else
|
|
|
|
loader_res = ''
|
|
|
|
endif
|
2020-07-24 14:35:03 +00:00
|
|
|
misc_summary = {
|
2024-09-24 07:32:22 +00:00
|
|
|
'Char device locks': chrdev_lock_files,
|
2020-10-08 12:39:38 +00:00
|
|
|
'docs': gen_docs,
|
2020-07-24 14:44:59 +00:00
|
|
|
'DTrace': conf.has('WITH_DTRACE_PROBES'),
|
2020-10-08 12:10:46 +00:00
|
|
|
'firewalld': conf.has('WITH_FIREWALLD'),
|
|
|
|
'firewalld-zone': conf.has('WITH_FIREWALLD_ZONE'),
|
2020-07-16 15:36:03 +00:00
|
|
|
'Init script': init_script,
|
2020-07-24 14:45:58 +00:00
|
|
|
'Loader/NVRAM': loader_res,
|
2024-09-24 07:32:22 +00:00
|
|
|
'nbdkit': conf.has('WITH_NBDKIT'),
|
|
|
|
'nss': conf.has('WITH_NSS'),
|
|
|
|
'numad': conf.has('WITH_NUMAD'),
|
2020-10-08 12:10:46 +00:00
|
|
|
'pm_utils': conf.has('WITH_PM_UTILS'),
|
2024-04-16 14:32:26 +00:00
|
|
|
'SSH proxy': conf.has('WITH_SSH_PROXY'),
|
2024-02-09 14:20:58 +00:00
|
|
|
'sysctl config': conf.has('WITH_SYSCTL'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'tests': tests_enabled,
|
|
|
|
'TLS priority': conf.get_unquoted('TLS_PRIORITY'),
|
2024-02-09 14:20:58 +00:00
|
|
|
'userfaultfd sysctl': conf.has('WITH_USERFAULTFD_SYSCTL'),
|
2024-09-24 07:32:22 +00:00
|
|
|
'virt-host-validate': conf.has('WITH_HOST_VALIDATE'),
|
|
|
|
'virt-login-shell': conf.has('WITH_LOGIN_SHELL'),
|
|
|
|
'Warning Flags': supported_cc_flags,
|
2020-07-24 14:35:03 +00:00
|
|
|
}
|
2024-05-28 13:16:13 +00:00
|
|
|
if conf.has('WITH_NETWORK')
|
|
|
|
misc_summary += {
|
|
|
|
'firewall backends': firewall_backend_priority,
|
|
|
|
}
|
|
|
|
endif
|
2020-07-24 14:35:03 +00:00
|
|
|
summary(misc_summary, section: 'Miscellaneous', bool_yn: true, list_sep: ' ')
|
2020-07-29 12:22:10 +00:00
|
|
|
|
|
|
|
devtools_summary = {
|
|
|
|
'wireshark_dissector': wireshark_dep.found(),
|
|
|
|
}
|
|
|
|
summary(devtools_summary, section: 'Developer Tools', bool_yn: true)
|
2020-07-01 01:07:00 +00:00
|
|
|
|
2024-02-19 09:23:35 +00:00
|
|
|
if missing_optional_programs.length() > 0
|
|
|
|
missing_list = ' '.join(missing_optional_programs)
|
|
|
|
missing_warn = ' (some tests will be skipped!)'
|
|
|
|
test_programs_summary = {
|
|
|
|
'Missing': missing_list + missing_warn,
|
|
|
|
}
|
|
|
|
summary(test_programs_summary, section: 'Optional programs', bool_yn: true)
|
|
|
|
endif
|
|
|
|
|
2020-07-01 01:07:00 +00:00
|
|
|
if conf.has('WITH_QEMU')
|
|
|
|
qemu_warn = ''
|
|
|
|
if qemu_user == 'root'
|
|
|
|
qemu_warn = ' !!! running QEMU as root is strongly discouraged !!!'
|
|
|
|
endif
|
|
|
|
priv_summary = {
|
|
|
|
'QEMU': '@0@:@1@@2@'.format(qemu_user, qemu_group, qemu_warn),
|
|
|
|
}
|
|
|
|
summary(priv_summary, section: 'Privileges')
|
|
|
|
endif
|