2005-12-01 17:34:21 +00:00
|
|
|
#!/usr/bin/python -u
|
|
|
|
#
|
|
|
|
# This is the API builder, it parses the C sources and build the
|
|
|
|
# API formal description in XML.
|
|
|
|
#
|
|
|
|
# See Copyright for the status of this software.
|
|
|
|
#
|
|
|
|
# daniel@veillard.com
|
|
|
|
#
|
|
|
|
import os, sys
|
|
|
|
import string
|
|
|
|
import glob
|
2013-01-11 10:39:19 +00:00
|
|
|
import re
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-05-12 10:19:42 +00:00
|
|
|
quiet=True
|
|
|
|
warnings=0
|
|
|
|
debug=False
|
2005-12-01 17:34:21 +00:00
|
|
|
debugsym=None
|
|
|
|
|
|
|
|
#
|
|
|
|
# C parser analysis code
|
|
|
|
#
|
2007-08-14 05:57:07 +00:00
|
|
|
included_files = {
|
|
|
|
"libvirt.h": "header with general libvirt API definitions",
|
|
|
|
"virterror.h": "header with error specific API definitions",
|
|
|
|
"libvirt.c": "Main interfaces for the libvirt library",
|
2013-01-29 15:24:35 +00:00
|
|
|
"virerror.c": "implements error handling and reporting code for libvirt",
|
2013-01-21 17:40:28 +00:00
|
|
|
"virevent.c": "event loop for monitoring file handles",
|
2013-01-21 17:41:26 +00:00
|
|
|
"virtypedparam.c": "virTypedParameters APIs",
|
2005-12-01 17:34:21 +00:00
|
|
|
}
|
|
|
|
|
2011-09-09 10:55:21 +00:00
|
|
|
qemu_included_files = {
|
|
|
|
"libvirt-qemu.h": "header with QEMU specific API definitions",
|
|
|
|
"libvirt-qemu.c": "Implementations for the QEMU specific APIs",
|
|
|
|
}
|
|
|
|
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 13:15:19 +00:00
|
|
|
lxc_included_files = {
|
|
|
|
"libvirt-lxc.h": "header with LXC specific API definitions",
|
|
|
|
"libvirt-lxc.c": "Implementations for the LXC specific APIs",
|
|
|
|
}
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
ignored_words = {
|
|
|
|
"ATTRIBUTE_UNUSED": (0, "macro keyword"),
|
2009-11-06 09:39:13 +00:00
|
|
|
"ATTRIBUTE_SENTINEL": (0, "macro keyword"),
|
2008-07-25 08:52:19 +00:00
|
|
|
"VIR_DEPRECATED": (0, "macro keyword"),
|
2010-03-16 22:54:22 +00:00
|
|
|
"VIR_EXPORT_VAR": (0, "macro keyword"),
|
2009-01-20 15:42:07 +00:00
|
|
|
"WINAPI": (0, "Windows keyword"),
|
|
|
|
"__declspec": (3, "Windows keyword"),
|
|
|
|
"__stdcall": (0, "Windows keyword"),
|
2005-12-01 17:34:21 +00:00
|
|
|
}
|
|
|
|
|
2008-11-25 15:48:11 +00:00
|
|
|
ignored_functions = {
|
2013-04-22 17:26:01 +00:00
|
|
|
"virConnectSupportsFeature": "private function for remote access",
|
2008-11-25 15:48:11 +00:00
|
|
|
"virDomainMigrateFinish": "private function for migration",
|
|
|
|
"virDomainMigrateFinish2": "private function for migration",
|
|
|
|
"virDomainMigratePerform": "private function for migration",
|
|
|
|
"virDomainMigratePrepare": "private function for migration",
|
|
|
|
"virDomainMigratePrepare2": "private function for migration",
|
2009-09-30 10:51:54 +00:00
|
|
|
"virDomainMigratePrepareTunnel": "private function for tunnelled migration",
|
Introduce yet another migration version in API.
Migration just seems to go from bad to worse. We already had to
introduce a second migration protocol when adding the QEMU driver,
since the one from Xen was insufficiently flexible to cope with
passing the data the QEMU driver required.
It turns out that this protocol still has some flaws that we
need to address. The current sequence is
* Src: DumpXML
- Generate XML to pass to dst
* Dst: Prepare
- Get ready to accept incoming VM
- Generate optional cookie to pass to src
* Src: Perform
- Start migration and wait for send completion
- Kill off VM if successful, resume if failed
* Dst: Finish
- Wait for recv completion and check status
- Kill off VM if unsuccessful
The problems with this are:
- Since the first step is a generic 'DumpXML' call, we can't
add in other migration specific data. eg, we can't include
any VM lease data from lock manager plugins
- Since the first step is a generic 'DumpXML' call, we can't
emit any 'migration begin' event on the source, or have
any hook that runs right at the start of the process
- Since there is no final step on the source, if the Finish
method fails to receive all migration data & has to kill
the VM, then there's no way to resume the original VM
on the source
This patch attempts to introduce a version 3 that uses the
improved 5 step sequence
* Src: Begin
- Generate XML to pass to dst
- Generate optional cookie to pass to dst
* Dst: Prepare
- Get ready to accept incoming VM
- Generate optional cookie to pass to src
* Src: Perform
- Start migration and wait for send completion
- Generate optional cookie to pass to dst
* Dst: Finish
- Wait for recv completion and check status
- Kill off VM if failed, resume if success
- Generate optional cookie to pass to src
* Src: Confirm
- Kill off VM if success, resume if failed
The API is designed to allow both input and output cookies
in all methods where applicable. This lets us pass around
arbitrary extra driver specific data between src & dst during
migration. Combined with the extra 'Begin' method this lets
us pass lease information from source to dst at the start of
migration
Moving the killing of the source VM out of Perform and
into Confirm, means we can now recover if the dst host
can't successfully Finish receiving migration data.
2010-11-02 12:43:44 +00:00
|
|
|
"virDomainMigrateBegin3": "private function for migration",
|
|
|
|
"virDomainMigrateFinish3": "private function for migration",
|
|
|
|
"virDomainMigratePerform3": "private function for migration",
|
|
|
|
"virDomainMigratePrepare3": "private function for migration",
|
|
|
|
"virDomainMigrateConfirm3": "private function for migration",
|
|
|
|
"virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
|
2009-07-30 14:30:50 +00:00
|
|
|
"DllMain": "specific function for Win32",
|
2013-05-03 13:34:10 +00:00
|
|
|
"virTypedParamsValidate": "internal function in virtypedparam.c",
|
2013-01-21 17:41:26 +00:00
|
|
|
"virTypedParameterAssign": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
|
2013-06-06 16:54:48 +00:00
|
|
|
"virTypedParameterToString": "internal function in virtypedparam.c",
|
2013-05-06 10:04:06 +00:00
|
|
|
"virTypedParamsCheck": "internal function in virtypedparam.c",
|
2013-06-07 14:34:13 +00:00
|
|
|
"virTypedParamsCopy": "internal function in virtypedparam.c",
|
2013-05-20 14:59:08 +00:00
|
|
|
"virDomainMigrateBegin3Params": "private function for migration",
|
|
|
|
"virDomainMigrateFinish3Params": "private function for migration",
|
|
|
|
"virDomainMigratePerform3Params": "private function for migration",
|
|
|
|
"virDomainMigratePrepare3Params": "private function for migration",
|
|
|
|
"virDomainMigrateConfirm3Params": "private function for migration",
|
|
|
|
"virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
|
2008-11-25 15:48:11 +00:00
|
|
|
}
|
|
|
|
|
2011-05-31 08:41:37 +00:00
|
|
|
ignored_macros = {
|
|
|
|
"_virSchedParameter": "backward compatibility macro for virTypedParameter",
|
|
|
|
"_virBlkioParameter": "backward compatibility macro for virTypedParameter",
|
|
|
|
"_virMemoryParameter": "backward compatibility macro for virTypedParameter",
|
|
|
|
}
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def escape(raw):
|
|
|
|
raw = string.replace(raw, '&', '&')
|
|
|
|
raw = string.replace(raw, '<', '<')
|
|
|
|
raw = string.replace(raw, '>', '>')
|
|
|
|
raw = string.replace(raw, "'", ''')
|
|
|
|
raw = string.replace(raw, '"', '"')
|
|
|
|
return raw
|
|
|
|
|
|
|
|
def uniq(items):
|
|
|
|
d = {}
|
|
|
|
for item in items:
|
|
|
|
d[item]=1
|
2009-10-02 11:17:18 +00:00
|
|
|
k = d.keys()
|
|
|
|
k.sort()
|
|
|
|
return k
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
class identifier:
|
|
|
|
def __init__(self, name, header=None, module=None, type=None, lineno = 0,
|
|
|
|
info=None, extra=None, conditionals = None):
|
|
|
|
self.name = name
|
2011-02-16 15:57:50 +00:00
|
|
|
self.header = header
|
|
|
|
self.module = module
|
|
|
|
self.type = type
|
|
|
|
self.info = info
|
|
|
|
self.extra = extra
|
|
|
|
self.lineno = lineno
|
|
|
|
self.static = 0
|
2013-08-22 09:16:03 +00:00
|
|
|
if conditionals is None or len(conditionals) == 0:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.conditionals = None
|
|
|
|
else:
|
|
|
|
self.conditionals = conditionals[:]
|
2011-05-12 10:19:42 +00:00
|
|
|
if self.name == debugsym and not quiet:
|
2011-02-16 15:57:50 +00:00
|
|
|
print "=> define %s : %s" % (debugsym, (module, type, info,
|
|
|
|
extra, conditionals))
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
r = "%s %s:" % (self.type, self.name)
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.static:
|
|
|
|
r = r + " static"
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.module is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
r = r + " from %s" % (self.module)
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.info is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
r = r + " " + `self.info`
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.extra is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
r = r + " " + `self.extra`
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.conditionals is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
r = r + " " + `self.conditionals`
|
|
|
|
return r
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
def set_header(self, header):
|
|
|
|
self.header = header
|
|
|
|
def set_module(self, module):
|
|
|
|
self.module = module
|
|
|
|
def set_type(self, type):
|
|
|
|
self.type = type
|
|
|
|
def set_info(self, info):
|
|
|
|
self.info = info
|
|
|
|
def set_extra(self, extra):
|
|
|
|
self.extra = extra
|
|
|
|
def set_lineno(self, lineno):
|
|
|
|
self.lineno = lineno
|
|
|
|
def set_static(self, static):
|
|
|
|
self.static = static
|
|
|
|
def set_conditionals(self, conditionals):
|
2013-08-22 09:16:03 +00:00
|
|
|
if conditionals is None or len(conditionals) == 0:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.conditionals = None
|
|
|
|
else:
|
|
|
|
self.conditionals = conditionals[:]
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def get_name(self):
|
|
|
|
return self.name
|
|
|
|
def get_header(self):
|
|
|
|
return self.module
|
|
|
|
def get_module(self):
|
|
|
|
return self.module
|
|
|
|
def get_type(self):
|
|
|
|
return self.type
|
|
|
|
def get_info(self):
|
|
|
|
return self.info
|
|
|
|
def get_lineno(self):
|
|
|
|
return self.lineno
|
|
|
|
def get_extra(self):
|
|
|
|
return self.extra
|
|
|
|
def get_static(self):
|
|
|
|
return self.static
|
|
|
|
def get_conditionals(self):
|
|
|
|
return self.conditionals
|
|
|
|
|
|
|
|
def update(self, header, module, type = None, info = None, extra=None,
|
|
|
|
conditionals=None):
|
2011-05-12 10:19:42 +00:00
|
|
|
if self.name == debugsym and not quiet:
|
2011-02-16 15:57:50 +00:00
|
|
|
print "=> update %s : %s" % (debugsym, (module, type, info,
|
|
|
|
extra, conditionals))
|
2013-08-22 09:16:03 +00:00
|
|
|
if header is not None and self.header is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_header(module)
|
2013-08-22 09:16:03 +00:00
|
|
|
if module is not None and (self.module is None or self.header == self.module):
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_module(module)
|
2013-08-22 09:16:03 +00:00
|
|
|
if type is not None and self.type is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_type(type)
|
2013-08-22 09:16:03 +00:00
|
|
|
if info is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_info(info)
|
2013-08-22 09:16:03 +00:00
|
|
|
if extra is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_extra(extra)
|
2013-08-22 09:16:03 +00:00
|
|
|
if conditionals is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.set_conditionals(conditionals)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
class index:
|
|
|
|
def __init__(self, name = "noname"):
|
|
|
|
self.name = name
|
|
|
|
self.identifiers = {}
|
|
|
|
self.functions = {}
|
2011-02-16 15:57:50 +00:00
|
|
|
self.variables = {}
|
|
|
|
self.includes = {}
|
|
|
|
self.structs = {}
|
2011-06-20 03:25:34 +00:00
|
|
|
self.unions = {}
|
2011-02-16 15:57:50 +00:00
|
|
|
self.enums = {}
|
|
|
|
self.typedefs = {}
|
|
|
|
self.macros = {}
|
|
|
|
self.references = {}
|
|
|
|
self.info = {}
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
|
|
|
|
if name[0:2] == '__':
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
2005-12-01 17:34:21 +00:00
|
|
|
d = None
|
|
|
|
try:
|
2011-02-16 15:57:50 +00:00
|
|
|
d = self.identifiers[name]
|
|
|
|
d.update(header, module, type, lineno, info, extra, conditionals)
|
|
|
|
except:
|
|
|
|
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
|
|
|
|
self.identifiers[name] = d
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if d is not None and static == 1:
|
2011-02-16 15:57:50 +00:00
|
|
|
d.set_static(1)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if d is not None and name is not None and type is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.references[name] = d
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-05-12 10:19:42 +00:00
|
|
|
if name == debugsym and not quiet:
|
2011-02-16 15:57:50 +00:00
|
|
|
print "New ref: %s" % (d)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
return d
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
|
|
|
|
if name[0:2] == '__':
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
2005-12-01 17:34:21 +00:00
|
|
|
d = None
|
|
|
|
try:
|
2011-02-16 15:57:50 +00:00
|
|
|
d = self.identifiers[name]
|
|
|
|
d.update(header, module, type, lineno, info, extra, conditionals)
|
|
|
|
except:
|
|
|
|
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
|
|
|
|
self.identifiers[name] = d
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if d is not None and static == 1:
|
2011-02-16 15:57:50 +00:00
|
|
|
d.set_static(1)
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if d is not None and name is not None and type is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if type == "function":
|
|
|
|
self.functions[name] = d
|
|
|
|
elif type == "functype":
|
|
|
|
self.functions[name] = d
|
|
|
|
elif type == "variable":
|
|
|
|
self.variables[name] = d
|
|
|
|
elif type == "include":
|
|
|
|
self.includes[name] = d
|
|
|
|
elif type == "struct":
|
|
|
|
self.structs[name] = d
|
2011-06-20 03:25:34 +00:00
|
|
|
elif type == "union":
|
|
|
|
self.unions[name] = d
|
2011-02-16 15:57:50 +00:00
|
|
|
elif type == "enum":
|
|
|
|
self.enums[name] = d
|
|
|
|
elif type == "typedef":
|
|
|
|
self.typedefs[name] = d
|
|
|
|
elif type == "macro":
|
|
|
|
self.macros[name] = d
|
|
|
|
else:
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("Unable to register type ", type)
|
2011-02-16 15:57:50 +00:00
|
|
|
|
2011-05-12 10:19:42 +00:00
|
|
|
if name == debugsym and not quiet:
|
2011-02-16 15:57:50 +00:00
|
|
|
print "New symbol: %s" % (d)
|
|
|
|
|
|
|
|
return d
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def merge(self, idx):
|
|
|
|
for id in idx.functions.keys():
|
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.macros.has_key(id):
|
|
|
|
del self.macros[id]
|
|
|
|
if self.functions.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("function %s from %s redeclared in %s" % (
|
|
|
|
id, self.functions[id].header, idx.functions[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.functions[id] = idx.functions[id]
|
|
|
|
self.identifiers[id] = idx.functions[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
for id in idx.variables.keys():
|
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.macros.has_key(id):
|
|
|
|
del self.macros[id]
|
|
|
|
if self.variables.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("variable %s from %s redeclared in %s" % (
|
|
|
|
id, self.variables[id].header, idx.variables[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.variables[id] = idx.variables[id]
|
|
|
|
self.identifiers[id] = idx.variables[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
for id in idx.structs.keys():
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.structs.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("struct %s from %s redeclared in %s" % (
|
|
|
|
id, self.structs[id].header, idx.structs[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.structs[id] = idx.structs[id]
|
|
|
|
self.identifiers[id] = idx.structs[id]
|
2011-06-20 03:25:34 +00:00
|
|
|
for id in idx.unions.keys():
|
|
|
|
if self.unions.has_key(id):
|
|
|
|
print "union %s from %s redeclared in %s" % (
|
|
|
|
id, self.unions[id].header, idx.unions[id].header)
|
|
|
|
else:
|
|
|
|
self.unions[id] = idx.unions[id]
|
|
|
|
self.identifiers[id] = idx.unions[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
for id in idx.typedefs.keys():
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.typedefs.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("typedef %s from %s redeclared in %s" % (
|
|
|
|
id, self.typedefs[id].header, idx.typedefs[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.typedefs[id] = idx.typedefs[id]
|
|
|
|
self.identifiers[id] = idx.typedefs[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
for id in idx.macros.keys():
|
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
|
|
|
if self.variables.has_key(id):
|
|
|
|
continue
|
|
|
|
if self.functions.has_key(id):
|
|
|
|
continue
|
|
|
|
if self.enums.has_key(id):
|
|
|
|
continue
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.macros.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("macro %s from %s redeclared in %s" % (
|
|
|
|
id, self.macros[id].header, idx.macros[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.macros[id] = idx.macros[id]
|
|
|
|
self.identifiers[id] = idx.macros[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
for id in idx.enums.keys():
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.enums.has_key(id):
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("enum %s from %s redeclared in %s" % (
|
|
|
|
id, self.enums[id].header, idx.enums[id].header))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.enums[id] = idx.enums[id]
|
|
|
|
self.identifiers[id] = idx.enums[id]
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def merge_public(self, idx):
|
|
|
|
for id in idx.functions.keys():
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.functions.has_key(id):
|
|
|
|
# check that function condition agrees with header
|
|
|
|
if idx.functions[id].conditionals != \
|
|
|
|
self.functions[id].conditionals:
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("Header condition differs from Function for %s:" \
|
|
|
|
% id)
|
|
|
|
self.warning(" H: %s" % self.functions[id].conditionals)
|
|
|
|
self.warning(" C: %s" % idx.functions[id].conditionals)
|
2011-02-16 15:57:50 +00:00
|
|
|
up = idx.functions[id]
|
|
|
|
self.functions[id].update(None, up.module, up.type, up.info, up.extra)
|
|
|
|
# else:
|
|
|
|
# print "Function %s from %s is not declared in headers" % (
|
|
|
|
# id, idx.functions[id].module)
|
|
|
|
# TODO: do the same for variables.
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def analyze_dict(self, type, dict):
|
|
|
|
count = 0
|
2011-02-16 15:57:50 +00:00
|
|
|
public = 0
|
2005-12-01 17:34:21 +00:00
|
|
|
for name in dict.keys():
|
2011-02-16 15:57:50 +00:00
|
|
|
id = dict[name]
|
|
|
|
count = count + 1
|
|
|
|
if id.static == 0:
|
|
|
|
public = public + 1
|
2005-12-01 17:34:21 +00:00
|
|
|
if count != public:
|
2011-02-16 15:57:50 +00:00
|
|
|
print " %d %s , %d public" % (count, type, public)
|
|
|
|
elif count != 0:
|
|
|
|
print " %d public %s" % (count, type)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
def analyze(self):
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
|
|
|
self.analyze_dict("functions", self.functions)
|
|
|
|
self.analyze_dict("variables", self.variables)
|
|
|
|
self.analyze_dict("structs", self.structs)
|
|
|
|
self.analyze_dict("unions", self.unions)
|
|
|
|
self.analyze_dict("typedefs", self.typedefs)
|
|
|
|
self.analyze_dict("macros", self.macros)
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
class CLexer:
|
|
|
|
"""A lexer for the C language, tokenize the input by reading and
|
|
|
|
analyzing it line by line"""
|
|
|
|
def __init__(self, input):
|
|
|
|
self.input = input
|
2011-02-16 15:57:50 +00:00
|
|
|
self.tokens = []
|
|
|
|
self.line = ""
|
|
|
|
self.lineno = 0
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def getline(self):
|
|
|
|
line = ''
|
2011-02-16 15:57:50 +00:00
|
|
|
while line == '':
|
|
|
|
line = self.input.readline()
|
|
|
|
if not line:
|
|
|
|
return None
|
|
|
|
self.lineno = self.lineno + 1
|
|
|
|
line = string.lstrip(line)
|
|
|
|
line = string.rstrip(line)
|
|
|
|
if line == '':
|
|
|
|
continue
|
|
|
|
while line[-1] == '\\':
|
|
|
|
line = line[:-1]
|
|
|
|
n = self.input.readline()
|
|
|
|
self.lineno = self.lineno + 1
|
|
|
|
n = string.lstrip(n)
|
|
|
|
n = string.rstrip(n)
|
|
|
|
if not n:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
line = line + n
|
2005-12-01 17:34:21 +00:00
|
|
|
return line
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def getlineno(self):
|
|
|
|
return self.lineno
|
|
|
|
|
|
|
|
def push(self, token):
|
2013-02-07 07:22:01 +00:00
|
|
|
self.tokens.insert(0, token)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def debug(self):
|
|
|
|
print "Last token: ", self.last
|
2011-02-16 15:57:50 +00:00
|
|
|
print "Token queue: ", self.tokens
|
|
|
|
print "Line %d end: " % (self.lineno), self.line
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def token(self):
|
|
|
|
while self.tokens == []:
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.line == "":
|
|
|
|
line = self.getline()
|
|
|
|
else:
|
|
|
|
line = self.line
|
|
|
|
self.line = ""
|
2013-08-22 09:16:03 +00:00
|
|
|
if line is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
if line[0] == '#':
|
|
|
|
self.tokens = map((lambda x: ('preproc', x)),
|
|
|
|
string.split(line))
|
2013-02-07 07:22:01 +00:00
|
|
|
break
|
2011-02-16 15:57:50 +00:00
|
|
|
l = len(line)
|
|
|
|
if line[0] == '"' or line[0] == "'":
|
|
|
|
end = line[0]
|
|
|
|
line = line[1:]
|
|
|
|
found = 0
|
|
|
|
tok = ""
|
|
|
|
while found == 0:
|
|
|
|
i = 0
|
|
|
|
l = len(line)
|
|
|
|
while i < l:
|
|
|
|
if line[i] == end:
|
|
|
|
self.line = line[i+1:]
|
|
|
|
line = line[:i]
|
|
|
|
l = i
|
|
|
|
found = 1
|
|
|
|
break
|
|
|
|
if line[i] == '\\':
|
|
|
|
i = i + 1
|
|
|
|
i = i + 1
|
|
|
|
tok = tok + line
|
|
|
|
if found == 0:
|
|
|
|
line = self.getline()
|
2013-08-22 09:16:03 +00:00
|
|
|
if line is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
self.last = ('string', tok)
|
|
|
|
return self.last
|
|
|
|
|
|
|
|
if l >= 2 and line[0] == '/' and line[1] == '*':
|
|
|
|
line = line[2:]
|
|
|
|
found = 0
|
|
|
|
tok = ""
|
|
|
|
while found == 0:
|
|
|
|
i = 0
|
|
|
|
l = len(line)
|
|
|
|
while i < l:
|
|
|
|
if line[i] == '*' and i+1 < l and line[i+1] == '/':
|
|
|
|
self.line = line[i+2:]
|
|
|
|
line = line[:i-1]
|
|
|
|
l = i
|
|
|
|
found = 1
|
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
if tok != "":
|
|
|
|
tok = tok + "\n"
|
|
|
|
tok = tok + line
|
|
|
|
if found == 0:
|
|
|
|
line = self.getline()
|
2013-08-22 09:16:03 +00:00
|
|
|
if line is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
self.last = ('comment', tok)
|
|
|
|
return self.last
|
|
|
|
if l >= 2 and line[0] == '/' and line[1] == '/':
|
|
|
|
line = line[2:]
|
|
|
|
self.last = ('comment', line)
|
|
|
|
return self.last
|
|
|
|
i = 0
|
|
|
|
while i < l:
|
|
|
|
if line[i] == '/' and i+1 < l and line[i+1] == '/':
|
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
|
|
|
if line[i] == '/' and i+1 < l and line[i+1] == '*':
|
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
|
|
|
if line[i] == '"' or line[i] == "'":
|
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
l = len(line)
|
|
|
|
i = 0
|
|
|
|
while i < l:
|
|
|
|
if line[i] == ' ' or line[i] == '\t':
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
o = ord(line[i])
|
|
|
|
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
|
|
|
|
(o >= 48 and o <= 57):
|
|
|
|
s = i
|
|
|
|
while i < l:
|
|
|
|
o = ord(line[i])
|
|
|
|
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
|
|
|
|
(o >= 48 and o <= 57) or string.find(
|
|
|
|
" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
|
|
|
|
i = i + 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.tokens.append(('name', line[s:i]))
|
|
|
|
continue
|
|
|
|
if string.find("(){}:;,[]", line[i]) != -1:
|
2005-12-01 17:34:21 +00:00
|
|
|
# if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
|
2011-02-16 15:57:50 +00:00
|
|
|
# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
|
|
|
|
# line[i] == ',' or line[i] == '[' or line[i] == ']':
|
|
|
|
self.tokens.append(('sep', line[i]))
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
if string.find("+-*><=/%&!|.", line[i]) != -1:
|
2005-12-01 17:34:21 +00:00
|
|
|
# if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
|
2011-02-16 15:57:50 +00:00
|
|
|
# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
|
|
|
|
# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
|
|
|
|
# line[i] == '!' or line[i] == '|' or line[i] == '.':
|
|
|
|
if line[i] == '.' and i + 2 < l and \
|
|
|
|
line[i+1] == '.' and line[i+2] == '.':
|
|
|
|
self.tokens.append(('name', '...'))
|
|
|
|
i = i + 3
|
|
|
|
continue
|
|
|
|
|
|
|
|
j = i + 1
|
|
|
|
if j < l and (
|
|
|
|
string.find("+-*><=/%&!|", line[j]) != -1):
|
|
|
|
# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
|
|
|
|
# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
|
|
|
|
# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
|
|
|
|
# line[j] == '!' or line[j] == '|'):
|
|
|
|
self.tokens.append(('op', line[i:j+1]))
|
|
|
|
i = j + 1
|
|
|
|
else:
|
|
|
|
self.tokens.append(('op', line[i]))
|
|
|
|
i = i + 1
|
|
|
|
continue
|
|
|
|
s = i
|
|
|
|
while i < l:
|
|
|
|
o = ord(line[i])
|
|
|
|
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
|
|
|
|
(o >= 48 and o <= 57) or (
|
|
|
|
string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
|
|
|
|
# line[i] != ' ' and line[i] != '\t' and
|
|
|
|
# line[i] != '(' and line[i] != ')' and
|
|
|
|
# line[i] != '{' and line[i] != '}' and
|
|
|
|
# line[i] != ':' and line[i] != ';' and
|
|
|
|
# line[i] != ',' and line[i] != '+' and
|
|
|
|
# line[i] != '-' and line[i] != '*' and
|
|
|
|
# line[i] != '/' and line[i] != '%' and
|
|
|
|
# line[i] != '&' and line[i] != '!' and
|
|
|
|
# line[i] != '|' and line[i] != '[' and
|
|
|
|
# line[i] != ']' and line[i] != '=' and
|
|
|
|
# line[i] != '*' and line[i] != '>' and
|
|
|
|
# line[i] != '<'):
|
|
|
|
i = i + 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.tokens.append(('name', line[s:i]))
|
|
|
|
|
|
|
|
tok = self.tokens[0]
|
|
|
|
self.tokens = self.tokens[1:]
|
|
|
|
self.last = tok
|
|
|
|
return tok
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
class CParser:
|
|
|
|
"""The C module parser"""
|
|
|
|
def __init__(self, filename, idx = None):
|
|
|
|
self.filename = filename
|
2011-02-16 15:57:50 +00:00
|
|
|
if len(filename) > 2 and filename[-2:] == '.h':
|
|
|
|
self.is_header = 1
|
|
|
|
else:
|
|
|
|
self.is_header = 0
|
2005-12-01 17:34:21 +00:00
|
|
|
self.input = open(filename)
|
2011-02-16 15:57:50 +00:00
|
|
|
self.lexer = CLexer(self.input)
|
2013-08-22 09:16:03 +00:00
|
|
|
if idx is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.index = index()
|
|
|
|
else:
|
|
|
|
self.index = idx
|
|
|
|
self.top_comment = ""
|
|
|
|
self.last_comment = ""
|
|
|
|
self.comment = None
|
|
|
|
self.collect_ref = 0
|
|
|
|
self.no_error = 0
|
|
|
|
self.conditionals = []
|
|
|
|
self.defines = []
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def collect_references(self):
|
|
|
|
self.collect_ref = 1
|
|
|
|
|
|
|
|
def stop_error(self):
|
|
|
|
self.no_error = 1
|
|
|
|
|
|
|
|
def start_error(self):
|
|
|
|
self.no_error = 0
|
|
|
|
|
|
|
|
def lineno(self):
|
|
|
|
return self.lexer.getlineno()
|
|
|
|
|
|
|
|
def index_add(self, name, module, static, type, info=None, extra = None):
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.is_header == 1:
|
|
|
|
self.index.add(name, module, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
|
|
|
else:
|
|
|
|
self.index.add(name, None, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def index_add_ref(self, name, module, static, type, info=None,
|
|
|
|
extra = None):
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.is_header == 1:
|
|
|
|
self.index.add_ref(name, module, module, static, type,
|
|
|
|
self.lineno(), info, extra, self.conditionals)
|
|
|
|
else:
|
|
|
|
self.index.add_ref(name, None, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def warning(self, msg):
|
2011-05-12 10:19:42 +00:00
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
2005-12-01 17:34:21 +00:00
|
|
|
if self.no_error:
|
2011-02-16 15:57:50 +00:00
|
|
|
return
|
|
|
|
print msg
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def error(self, msg, token=-1):
|
|
|
|
if self.no_error:
|
2011-02-16 15:57:50 +00:00
|
|
|
return
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
print "Parse Error: " + msg
|
2011-02-16 15:57:50 +00:00
|
|
|
if token != -1:
|
|
|
|
print "Got token ", token
|
|
|
|
self.lexer.debug()
|
|
|
|
sys.exit(1)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def debug(self, msg, token=-1):
|
|
|
|
print "Debug: " + msg
|
2011-02-16 15:57:50 +00:00
|
|
|
if token != -1:
|
|
|
|
print "Got token ", token
|
|
|
|
self.lexer.debug()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def parseTopComment(self, comment):
|
2011-02-16 15:57:50 +00:00
|
|
|
res = {}
|
|
|
|
lines = string.split(comment, "\n")
|
|
|
|
item = None
|
|
|
|
for line in lines:
|
2013-01-10 15:02:23 +00:00
|
|
|
line = line.lstrip().lstrip('*').lstrip()
|
2013-01-11 10:39:19 +00:00
|
|
|
|
|
|
|
m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
|
|
|
|
if m:
|
|
|
|
item = m.group(1)
|
|
|
|
line = m.group(2).lstrip()
|
|
|
|
|
|
|
|
if item:
|
2011-02-16 15:57:50 +00:00
|
|
|
if res.has_key(item):
|
|
|
|
res[item] = res[item] + " " + line
|
|
|
|
else:
|
|
|
|
res[item] = line
|
|
|
|
self.index.info = res
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-06-20 03:25:34 +00:00
|
|
|
def strip_lead_star(self, line):
|
|
|
|
l = len(line)
|
|
|
|
i = 0
|
|
|
|
while i < l:
|
|
|
|
if line[i] == ' ' or line[i] == '\t':
|
|
|
|
i += 1
|
|
|
|
elif line[i] == '*':
|
|
|
|
return line[:i] + line[i + 1:]
|
|
|
|
else:
|
|
|
|
return line
|
|
|
|
return line
|
|
|
|
|
|
|
|
def cleanupComment(self):
|
|
|
|
if type(self.comment) != type(""):
|
|
|
|
return
|
|
|
|
# remove the leading * on multi-line comments
|
|
|
|
lines = self.comment.splitlines(True)
|
|
|
|
com = ""
|
|
|
|
for line in lines:
|
|
|
|
com = com + self.strip_lead_star(line)
|
|
|
|
self.comment = com.strip()
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def parseComment(self, token):
|
2011-06-20 03:25:34 +00:00
|
|
|
com = token[1]
|
2005-12-01 17:34:21 +00:00
|
|
|
if self.top_comment == "":
|
2011-06-20 03:25:34 +00:00
|
|
|
self.top_comment = com
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.comment is None or com[0] == '*':
|
2013-02-07 07:22:01 +00:00
|
|
|
self.comment = com
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
2011-06-20 03:25:34 +00:00
|
|
|
self.comment = self.comment + com
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.lexer.token()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
if string.find(self.comment, "DOC_DISABLE") != -1:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.stop_error()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
if string.find(self.comment, "DOC_ENABLE") != -1:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.start_error()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a comment block associate to a typedef
|
|
|
|
#
|
|
|
|
def parseTypeComment(self, name, quiet = 0):
|
|
|
|
if name[0:2] == '__':
|
2011-02-16 15:57:50 +00:00
|
|
|
quiet = 1
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
args = []
|
2011-02-16 15:57:50 +00:00
|
|
|
desc = ""
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.comment is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing comment for type %s" % (name))
|
|
|
|
return((args, desc))
|
2005-12-01 17:34:21 +00:00
|
|
|
if self.comment[0] != '*':
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing * in type comment for %s" % (name))
|
|
|
|
return((args, desc))
|
|
|
|
lines = string.split(self.comment, '\n')
|
|
|
|
if lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
if lines[0] != "* %s:" % (name):
|
|
|
|
if not quiet:
|
|
|
|
self.warning("Misformatted type comment for %s" % (name))
|
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
|
|
|
return((args, desc))
|
|
|
|
del lines[0]
|
|
|
|
while len(lines) > 0 and lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
desc = ""
|
|
|
|
while len(lines) > 0:
|
|
|
|
l = lines[0]
|
|
|
|
while len(l) > 0 and l[0] == '*':
|
|
|
|
l = l[1:]
|
|
|
|
l = string.strip(l)
|
|
|
|
desc = desc + " " + l
|
|
|
|
del lines[0]
|
|
|
|
|
|
|
|
desc = string.strip(desc)
|
|
|
|
|
|
|
|
if quiet == 0:
|
|
|
|
if desc == "":
|
|
|
|
self.warning("Type comment for %s lack description of the macro" % (name))
|
|
|
|
|
|
|
|
return(desc)
|
2005-12-01 17:34:21 +00:00
|
|
|
#
|
|
|
|
# Parse a comment block associate to a macro
|
|
|
|
#
|
|
|
|
def parseMacroComment(self, name, quiet = 0):
|
2011-05-31 08:41:37 +00:00
|
|
|
global ignored_macros
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 15:57:50 +00:00
|
|
|
quiet = 1
|
2011-05-31 08:41:37 +00:00
|
|
|
if ignored_macros.has_key(name):
|
|
|
|
quiet = 1
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
args = []
|
2011-02-16 15:57:50 +00:00
|
|
|
desc = ""
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.comment is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing comment for macro %s" % (name))
|
|
|
|
return((args, desc))
|
2005-12-01 17:34:21 +00:00
|
|
|
if self.comment[0] != '*':
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing * in macro comment for %s" % (name))
|
|
|
|
return((args, desc))
|
|
|
|
lines = string.split(self.comment, '\n')
|
|
|
|
if lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
if lines[0] != "* %s:" % (name):
|
|
|
|
if not quiet:
|
|
|
|
self.warning("Misformatted macro comment for %s" % (name))
|
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
|
|
|
return((args, desc))
|
|
|
|
del lines[0]
|
|
|
|
while lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
while len(lines) > 0 and lines[0][0:3] == '* @':
|
|
|
|
l = lines[0][3:]
|
|
|
|
try:
|
|
|
|
(arg, desc) = string.split(l, ':', 1)
|
|
|
|
desc=string.strip(desc)
|
|
|
|
arg=string.strip(arg)
|
2005-12-01 17:34:21 +00:00
|
|
|
except:
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Misformatted macro comment for %s" % (name))
|
|
|
|
self.warning(" problem with '%s'" % (lines[0]))
|
|
|
|
del lines[0]
|
|
|
|
continue
|
|
|
|
del lines[0]
|
|
|
|
l = string.strip(lines[0])
|
|
|
|
while len(l) > 2 and l[0:3] != '* @':
|
|
|
|
while l[0] == '*':
|
|
|
|
l = l[1:]
|
|
|
|
desc = desc + ' ' + string.strip(l)
|
|
|
|
del lines[0]
|
|
|
|
if len(lines) == 0:
|
|
|
|
break
|
|
|
|
l = lines[0]
|
2005-12-01 17:34:21 +00:00
|
|
|
args.append((arg, desc))
|
2011-02-16 15:57:50 +00:00
|
|
|
while len(lines) > 0 and lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
desc = ""
|
|
|
|
while len(lines) > 0:
|
|
|
|
l = lines[0]
|
|
|
|
while len(l) > 0 and l[0] == '*':
|
|
|
|
l = l[1:]
|
|
|
|
l = string.strip(l)
|
|
|
|
desc = desc + " " + l
|
|
|
|
del lines[0]
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
desc = string.strip(desc)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
if quiet == 0:
|
|
|
|
if desc == "":
|
|
|
|
self.warning("Macro comment for %s lack description of the macro" % (name))
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
return((args, desc))
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
2008-03-14 11:08:03 +00:00
|
|
|
# Parse a comment block and merge the information found in the
|
2005-12-01 17:34:21 +00:00
|
|
|
# parameters descriptions, finally returns a block as complete
|
|
|
|
# as possible
|
|
|
|
#
|
|
|
|
def mergeFunctionComment(self, name, description, quiet = 0):
|
2008-11-25 15:48:11 +00:00
|
|
|
global ignored_functions
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
if name == 'main':
|
2011-02-16 15:57:50 +00:00
|
|
|
quiet = 1
|
2005-12-01 17:34:21 +00:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 15:57:50 +00:00
|
|
|
quiet = 1
|
2008-11-25 15:48:11 +00:00
|
|
|
if ignored_functions.has_key(name):
|
|
|
|
quiet = 1
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
(ret, args) = description
|
|
|
|
desc = ""
|
|
|
|
retdesc = ""
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.comment is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing comment for function %s" % (name))
|
|
|
|
return(((ret[0], retdesc), args, desc))
|
2005-12-01 17:34:21 +00:00
|
|
|
if self.comment[0] != '*':
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Missing * in function comment for %s" % (name))
|
|
|
|
return(((ret[0], retdesc), args, desc))
|
|
|
|
lines = string.split(self.comment, '\n')
|
|
|
|
if lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
if lines[0] != "* %s:" % (name):
|
|
|
|
if not quiet:
|
|
|
|
self.warning("Misformatted function comment for %s" % (name))
|
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
|
|
|
return(((ret[0], retdesc), args, desc))
|
|
|
|
del lines[0]
|
|
|
|
while lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
nbargs = len(args)
|
|
|
|
while len(lines) > 0 and lines[0][0:3] == '* @':
|
|
|
|
l = lines[0][3:]
|
|
|
|
try:
|
|
|
|
(arg, desc) = string.split(l, ':', 1)
|
|
|
|
desc=string.strip(desc)
|
|
|
|
arg=string.strip(arg)
|
2005-12-01 17:34:21 +00:00
|
|
|
except:
|
2011-02-16 15:57:50 +00:00
|
|
|
if not quiet:
|
|
|
|
self.warning("Misformatted function comment for %s" % (name))
|
|
|
|
self.warning(" problem with '%s'" % (lines[0]))
|
|
|
|
del lines[0]
|
|
|
|
continue
|
|
|
|
del lines[0]
|
|
|
|
l = string.strip(lines[0])
|
|
|
|
while len(l) > 2 and l[0:3] != '* @':
|
|
|
|
while l[0] == '*':
|
|
|
|
l = l[1:]
|
|
|
|
desc = desc + ' ' + string.strip(l)
|
|
|
|
del lines[0]
|
|
|
|
if len(lines) == 0:
|
|
|
|
break
|
|
|
|
l = lines[0]
|
|
|
|
i = 0
|
|
|
|
while i < nbargs:
|
|
|
|
if args[i][1] == arg:
|
|
|
|
args[i] = (args[i][0], arg, desc)
|
2013-02-07 07:22:01 +00:00
|
|
|
break
|
2011-02-16 15:57:50 +00:00
|
|
|
i = i + 1
|
|
|
|
if i >= nbargs:
|
|
|
|
if not quiet:
|
|
|
|
self.warning("Unable to find arg %s from function comment for %s" % (
|
|
|
|
arg, name))
|
|
|
|
while len(lines) > 0 and lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
desc = None
|
|
|
|
while len(lines) > 0:
|
|
|
|
l = lines[0]
|
|
|
|
i = 0
|
|
|
|
# Remove all leading '*', followed by at most one ' ' character
|
2013-07-30 08:21:11 +00:00
|
|
|
# since we need to preserve correct indentation of code examples
|
2011-02-16 15:57:50 +00:00
|
|
|
while i < len(l) and l[i] == '*':
|
|
|
|
i = i + 1
|
|
|
|
if i > 0:
|
|
|
|
if i < len(l) and l[i] == ' ':
|
|
|
|
i = i + 1
|
|
|
|
l = l[i:]
|
|
|
|
if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
|
|
|
|
try:
|
|
|
|
l = string.split(l, ' ', 1)[1]
|
|
|
|
except:
|
|
|
|
l = ""
|
|
|
|
retdesc = string.strip(l)
|
|
|
|
del lines[0]
|
|
|
|
while len(lines) > 0:
|
|
|
|
l = lines[0]
|
|
|
|
while len(l) > 0 and l[0] == '*':
|
|
|
|
l = l[1:]
|
|
|
|
l = string.strip(l)
|
|
|
|
retdesc = retdesc + " " + l
|
|
|
|
del lines[0]
|
|
|
|
else:
|
|
|
|
if desc is not None:
|
|
|
|
desc = desc + "\n" + l
|
|
|
|
else:
|
|
|
|
desc = l
|
|
|
|
del lines[0]
|
|
|
|
|
|
|
|
if desc is None:
|
|
|
|
desc = ""
|
|
|
|
retdesc = string.strip(retdesc)
|
|
|
|
desc = string.strip(desc)
|
|
|
|
|
|
|
|
if quiet == 0:
|
|
|
|
#
|
|
|
|
# report missing comments
|
|
|
|
#
|
|
|
|
i = 0
|
|
|
|
while i < nbargs:
|
2013-08-22 09:16:03 +00:00
|
|
|
if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
|
|
|
|
i = i + 1
|
|
|
|
if retdesc == "" and ret[0] != "void":
|
|
|
|
self.warning("Function comment for %s lacks description of return value" % (name))
|
|
|
|
if desc == "":
|
|
|
|
self.warning("Function comment for %s lacks description of the function" % (name))
|
|
|
|
|
|
|
|
|
|
|
|
return(((ret[0], retdesc), args, desc))
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def parsePreproc(self, token):
|
2011-02-16 15:57:50 +00:00
|
|
|
if debug:
|
|
|
|
print "=> preproc ", token, self.lexer.tokens
|
2005-12-01 17:34:21 +00:00
|
|
|
name = token[1]
|
2011-02-16 15:57:50 +00:00
|
|
|
if name == "#include":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
if token[0] == 'preproc':
|
|
|
|
self.index_add(token[1], self.filename, not self.is_header,
|
|
|
|
"include")
|
|
|
|
return self.lexer.token()
|
|
|
|
return token
|
|
|
|
if name == "#define":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
if token[0] == 'preproc':
|
|
|
|
# TODO macros with arguments
|
|
|
|
name = token[1]
|
|
|
|
lst = []
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == 'preproc' and \
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1][0] != '#':
|
|
|
|
lst.append(token[1])
|
|
|
|
token = self.lexer.token()
|
2005-12-01 17:34:21 +00:00
|
|
|
try:
|
2011-02-16 15:57:50 +00:00
|
|
|
name = string.split(name, '(') [0]
|
2005-12-01 17:34:21 +00:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
info = self.parseMacroComment(name, not self.is_header)
|
2011-02-16 15:57:50 +00:00
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
|
|
|
"macro", info)
|
|
|
|
return token
|
|
|
|
|
|
|
|
#
|
|
|
|
# Processing of conditionals modified by Bill 1/1/05
|
|
|
|
#
|
|
|
|
# We process conditionals (i.e. tokens from #ifdef, #ifndef,
|
|
|
|
# #if, #else and #endif) for headers and mainline code,
|
|
|
|
# store the ones from the header in libxml2-api.xml, and later
|
|
|
|
# (in the routine merge_public) verify that the two (header and
|
|
|
|
# mainline code) agree.
|
|
|
|
#
|
|
|
|
# There is a small problem with processing the headers. Some of
|
|
|
|
# the variables are not concerned with enabling / disabling of
|
|
|
|
# library functions (e.g. '__XML_PARSER_H__'), and we don't want
|
|
|
|
# them to be included in libxml2-api.xml, or involved in
|
|
|
|
# the check between the header and the mainline code. To
|
|
|
|
# accomplish this, we ignore any conditional which doesn't include
|
|
|
|
# the string 'ENABLED'
|
|
|
|
#
|
|
|
|
if name == "#ifdef":
|
|
|
|
apstr = self.lexer.tokens[0][1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
|
|
|
if string.find(apstr, 'ENABLED') != -1:
|
|
|
|
self.conditionals.append("defined(%s)" % apstr)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
elif name == "#ifndef":
|
|
|
|
apstr = self.lexer.tokens[0][1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
|
|
|
if string.find(apstr, 'ENABLED') != -1:
|
|
|
|
self.conditionals.append("!defined(%s)" % apstr)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
elif name == "#if":
|
|
|
|
apstr = ""
|
|
|
|
for tok in self.lexer.tokens:
|
|
|
|
if apstr != "":
|
|
|
|
apstr = apstr + " "
|
|
|
|
apstr = apstr + tok[1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
|
|
|
if string.find(apstr, 'ENABLED') != -1:
|
|
|
|
self.conditionals.append(apstr)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
elif name == "#else":
|
|
|
|
if self.conditionals != [] and \
|
|
|
|
string.find(self.defines[-1], 'ENABLED') != -1:
|
|
|
|
self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
|
|
|
|
elif name == "#endif":
|
|
|
|
if self.conditionals != [] and \
|
|
|
|
string.find(self.defines[-1], 'ENABLED') != -1:
|
|
|
|
self.conditionals = self.conditionals[:-1]
|
|
|
|
self.defines = self.defines[:-1]
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == 'preproc' and \
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1][0] != '#':
|
|
|
|
token = self.lexer.token()
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# token acquisition on top of the lexer, it handle internally
|
|
|
|
# preprocessor and comments since they are logically not part of
|
|
|
|
# the program structure.
|
|
|
|
#
|
2005-12-06 16:50:31 +00:00
|
|
|
def push(self, tok):
|
|
|
|
self.lexer.push(tok)
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def token(self):
|
|
|
|
global ignored_words
|
|
|
|
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == 'comment':
|
|
|
|
token = self.parseComment(token)
|
|
|
|
continue
|
|
|
|
elif token[0] == 'preproc':
|
|
|
|
token = self.parsePreproc(token)
|
|
|
|
continue
|
|
|
|
elif token[0] == "name" and token[1] == "__const":
|
|
|
|
token = ("name", "const")
|
|
|
|
return token
|
|
|
|
elif token[0] == "name" and token[1] == "__attribute":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[1] != ";":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.lexer.token()
|
|
|
|
return token
|
|
|
|
elif token[0] == "name" and ignored_words.has_key(token[1]):
|
|
|
|
(n, info) = ignored_words[token[1]]
|
|
|
|
i = 0
|
|
|
|
while i < n:
|
|
|
|
token = self.lexer.token()
|
|
|
|
i = i + 1
|
|
|
|
token = self.lexer.token()
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
if debug:
|
|
|
|
print "=> ", token
|
|
|
|
return token
|
|
|
|
return None
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a typedef, it records the type and its name.
|
|
|
|
#
|
|
|
|
def parseTypedef(self, token):
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
token = self.parseType(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.error("parsing typedef")
|
|
|
|
return None
|
|
|
|
base_type = self.type
|
|
|
|
type = base_type
|
|
|
|
#self.debug("end typedef type", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "name":
|
|
|
|
name = token[1]
|
|
|
|
signature = self.signature
|
2013-08-22 09:16:03 +00:00
|
|
|
if signature is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
type = string.split(type, '(')[0]
|
|
|
|
d = self.mergeFunctionComment(name,
|
|
|
|
((type, None), signature), 1)
|
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
|
|
|
"functype", d)
|
|
|
|
else:
|
|
|
|
if base_type == "struct":
|
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
|
|
|
"struct", type)
|
|
|
|
base_type = "struct " + name
|
|
|
|
else:
|
|
|
|
# TODO report missing or misformatted comments
|
|
|
|
info = self.parseTypeComment(name, 1)
|
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
|
|
|
"typedef", type, info)
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parsing typedef: expecting a name")
|
|
|
|
return token
|
|
|
|
#self.debug("end typedef", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == 'sep' and token[1] == ',':
|
2011-02-16 15:57:50 +00:00
|
|
|
type = base_type
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == "op":
|
2011-02-16 15:57:50 +00:00
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == 'sep' and token[1] == ';':
|
2013-02-07 07:22:01 +00:00
|
|
|
break
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == 'name':
|
2011-02-16 15:57:50 +00:00
|
|
|
type = base_type
|
2013-02-07 07:22:01 +00:00
|
|
|
continue
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
self.error("parsing typedef: expecting ';'", token)
|
|
|
|
return token
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
#
|
|
|
|
# Parse a C code block, used for functions it parse till
|
|
|
|
# the balancing } included
|
|
|
|
#
|
|
|
|
def parseBlock(self, token):
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
if self.collect_ref == 1:
|
|
|
|
oldtok = token
|
|
|
|
token = self.token()
|
|
|
|
if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
|
|
|
|
if token[0] == "sep" and token[1] == "(":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
|
|
|
0, "function")
|
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "name":
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and (token[1] == ";" or
|
|
|
|
token[1] == "," or token[1] == "="):
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
|
|
|
0, "type")
|
|
|
|
elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
|
|
|
0, "typedef")
|
|
|
|
elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
|
|
|
0, "typedef")
|
|
|
|
|
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a C struct definition till the balancing }
|
|
|
|
#
|
|
|
|
def parseStruct(self, token):
|
|
|
|
fields = []
|
2011-02-16 15:57:50 +00:00
|
|
|
#self.debug("start parseStruct", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.struct_fields = fields
|
|
|
|
#self.debug("end parseStruct", token)
|
|
|
|
#print fields
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
base_type = self.type
|
|
|
|
#self.debug("before parseType", token)
|
|
|
|
token = self.parseType(token)
|
|
|
|
#self.debug("after parseType", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
fname = token[1]
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
2011-06-20 03:25:34 +00:00
|
|
|
self.cleanupComment()
|
|
|
|
if self.type == "union":
|
|
|
|
fields.append((self.type, fname, self.comment,
|
|
|
|
self.union_fields))
|
|
|
|
self.union_fields = []
|
|
|
|
else:
|
|
|
|
fields.append((self.type, fname, self.comment))
|
2011-02-16 15:57:50 +00:00
|
|
|
self.comment = None
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: expecting ;", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == "{":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ";":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: expecting ;", token)
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: name", token)
|
|
|
|
token = self.token()
|
2013-02-07 07:22:01 +00:00
|
|
|
self.type = base_type
|
2005-12-01 17:34:21 +00:00
|
|
|
self.struct_fields = fields
|
2011-02-16 15:57:50 +00:00
|
|
|
#self.debug("end parseStruct", token)
|
|
|
|
#print fields
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-06-20 03:25:34 +00:00
|
|
|
#
|
|
|
|
# Parse a C union definition till the balancing }
|
|
|
|
#
|
|
|
|
def parseUnion(self, token):
|
|
|
|
fields = []
|
|
|
|
# self.debug("start parseUnion", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-06-20 03:25:34 +00:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.union_fields = fields
|
|
|
|
# self.debug("end parseUnion", token)
|
|
|
|
# print fields
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
base_type = self.type
|
|
|
|
# self.debug("before parseType", token)
|
|
|
|
token = self.parseType(token)
|
|
|
|
# self.debug("after parseType", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-06-20 03:25:34 +00:00
|
|
|
fname = token[1]
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
self.cleanupComment()
|
|
|
|
fields.append((self.type, fname, self.comment))
|
|
|
|
self.comment = None
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: expecting ;", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == "{":
|
2011-06-20 03:25:34 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-06-20 03:25:34 +00:00
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ";":
|
2011-06-20 03:25:34 +00:00
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: expecting ;", token)
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: name", token)
|
|
|
|
token = self.token()
|
2013-02-07 07:22:01 +00:00
|
|
|
self.type = base_type
|
2011-06-20 03:25:34 +00:00
|
|
|
self.union_fields = fields
|
|
|
|
# self.debug("end parseUnion", token)
|
|
|
|
# print fields
|
|
|
|
return token
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
#
|
|
|
|
# Parse a C enum block, parse till the balancing }
|
|
|
|
#
|
|
|
|
def parseEnumBlock(self, token):
|
|
|
|
self.enums = []
|
2011-02-16 15:57:50 +00:00
|
|
|
name = None
|
|
|
|
self.comment = None
|
|
|
|
comment = ""
|
|
|
|
value = "0"
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
2013-08-22 09:16:03 +00:00
|
|
|
if name is not None:
|
2011-06-20 03:25:34 +00:00
|
|
|
self.cleanupComment()
|
2013-08-22 09:16:03 +00:00
|
|
|
if self.comment is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
comment = self.comment
|
|
|
|
self.comment = None
|
|
|
|
self.enums.append((name, value, comment))
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
elif token[0] == "name":
|
2011-06-20 03:25:34 +00:00
|
|
|
self.cleanupComment()
|
2013-08-22 09:16:03 +00:00
|
|
|
if name is not None:
|
|
|
|
if self.comment is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
comment = string.strip(self.comment)
|
|
|
|
self.comment = None
|
|
|
|
self.enums.append((name, value, comment))
|
|
|
|
name = token[1]
|
|
|
|
comment = ""
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "op" and token[1][0] == "=":
|
|
|
|
value = ""
|
|
|
|
if len(token[1]) > 1:
|
|
|
|
value = token[1][1:]
|
|
|
|
token = self.token()
|
|
|
|
while token[0] != "sep" or (token[1] != ',' and
|
|
|
|
token[1] != '}'):
|
|
|
|
value = value + token[1]
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
value = "%d" % (int(value) + 1)
|
|
|
|
except:
|
|
|
|
self.warning("Failed to compute value of enum %s" % (name))
|
|
|
|
value=""
|
|
|
|
if token[0] == "sep" and token[1] == ",":
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2012-05-15 10:59:00 +00:00
|
|
|
def parseVirEnumDecl(self, token):
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
|
|
|
|
|
|
|
|
if token[1] != ')':
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
|
|
|
def parseVirEnumImpl(self, token):
|
|
|
|
# First the type name
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
if token[1] != ',':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
# Now the sentinel name
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
if token[1] != ',':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
# Now a list of strings (optional comments)
|
|
|
|
while token is not None:
|
|
|
|
isGettext = False
|
|
|
|
# First a string, optionally with N_(...)
|
|
|
|
if token[0] == 'name':
|
|
|
|
if token[1] != 'N_':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
|
|
|
|
token = self.token()
|
|
|
|
if token[0] != "sep" or token[1] != '(':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
|
|
|
|
token = self.token()
|
|
|
|
isGettext = True
|
|
|
|
|
|
|
|
if token[0] != "string":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
|
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "string":
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
|
|
|
|
|
|
|
|
# Then a separator
|
|
|
|
if token[0] == "sep":
|
|
|
|
if isGettext and token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[1] == ',':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
break
|
|
|
|
|
|
|
|
# Then an optional comment
|
|
|
|
if token[0] == "comment":
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
def parseVirLogInit(self, token):
|
|
|
|
if token[0] != "string":
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting string", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting ')'", token)
|
|
|
|
|
|
|
|
if token[1] != ')':
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting ')'", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
#
|
2011-06-20 03:25:34 +00:00
|
|
|
# Parse a C definition block, used for structs or unions it parse till
|
2005-12-01 17:34:21 +00:00
|
|
|
# the balancing }
|
|
|
|
#
|
|
|
|
def parseTypeBlock(self, token):
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a type: the fact that the type name can either occur after
|
|
|
|
# the definition or within the definition makes it a little harder
|
|
|
|
# if inside, the name token is pushed back before returning
|
|
|
|
#
|
|
|
|
def parseType(self, token):
|
|
|
|
self.type = ""
|
2011-02-16 15:57:50 +00:00
|
|
|
self.struct_fields = []
|
2011-06-20 03:25:34 +00:00
|
|
|
self.union_fields = []
|
2005-12-01 17:34:21 +00:00
|
|
|
self.signature = None
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
|
|
|
|
while token[0] == "name" and (
|
|
|
|
token[1] == "const" or \
|
|
|
|
token[1] == "unsigned" or \
|
|
|
|
token[1] == "signed"):
|
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2005-12-06 16:50:31 +00:00
|
|
|
if token[0] == "name" and token[1] == "long":
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
|
|
|
|
# some read ahead for long long
|
|
|
|
oldtmp = token
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "name" and token[1] == "long":
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.push(token)
|
|
|
|
token = oldtmp
|
|
|
|
|
2011-06-20 03:25:34 +00:00
|
|
|
oldtmp = token
|
|
|
|
token = self.token()
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "name" and token[1] == "int":
|
2011-06-20 03:25:34 +00:00
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.push(token)
|
|
|
|
token = oldtmp
|
2005-12-06 16:50:31 +00:00
|
|
|
|
|
|
|
elif token[0] == "name" and token[1] == "short":
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
elif token[0] == "name" and token[1] == "struct":
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
|
|
|
nametok = None
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseStruct(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "op" and token[1] == "*":
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + " " + nametok[1] + " *"
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == "op" and token[1] == "*":
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + " *"
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("struct : expecting name", token)
|
|
|
|
return token
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "name" and nametok is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + " " + nametok[1]
|
|
|
|
return token
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if nametok is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-06-20 03:25:34 +00:00
|
|
|
elif token[0] == "name" and token[1] == "union":
|
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
|
|
|
nametok = None
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2011-06-20 03:25:34 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseUnion(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "name" and nametok is not None:
|
2011-06-20 03:25:34 +00:00
|
|
|
self.type = self.type + " " + nametok[1]
|
|
|
|
return token
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if nametok is not None:
|
2011-06-20 03:25:34 +00:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
elif token[0] == "name" and token[1] == "enum":
|
2011-02-16 15:57:50 +00:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
self.enums = []
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseEnumBlock(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing enum: expecting '{'", token)
|
|
|
|
enum_type = None
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] != "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "enum")
|
|
|
|
else:
|
|
|
|
enum_type = token[1]
|
|
|
|
for enum in self.enums:
|
|
|
|
self.index_add(enum[0], self.filename,
|
|
|
|
not self.is_header, "enum",
|
|
|
|
(enum[1], enum[2], enum_type))
|
|
|
|
return token
|
2012-05-15 10:59:00 +00:00
|
|
|
elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
2012-05-15 10:59:00 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirEnumDecl(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting '('", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None:
|
2012-05-15 10:59:00 +00:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virenumdecl")
|
|
|
|
return token
|
|
|
|
|
|
|
|
elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
2012-05-15 10:59:00 +00:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirEnumImpl(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None:
|
2012-05-15 10:59:00 +00:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virenumimpl")
|
|
|
|
return token
|
2011-02-16 15:57:50 +00:00
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
|
|
|
|
token = self.token()
|
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirLogInit(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting '('", token)
|
|
|
|
if token is not None:
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virloginit")
|
|
|
|
return token
|
|
|
|
|
2011-02-16 15:57:50 +00:00
|
|
|
elif token[0] == "name":
|
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.error("parsing type %s: expecting a name" % (self.type),
|
|
|
|
token)
|
|
|
|
return token
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and (token[0] == "op" or
|
2011-02-16 15:57:50 +00:00
|
|
|
token[0] == "name" and token[1] == "const"):
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
2011-02-16 15:57:50 +00:00
|
|
|
# if there is a parenthesis here, this means a function type
|
|
|
|
#
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '(':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == "op" and token[1] == '*':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None or token[0] != "name" :
|
2013-02-07 07:22:01 +00:00
|
|
|
self.error("parsing function type, name expected", token)
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
self.type = self.type + token[1]
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ')':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '(':
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
2013-02-07 07:22:01 +00:00
|
|
|
type = self.type
|
|
|
|
token = self.parseSignature(token)
|
|
|
|
self.type = type
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
2013-02-07 07:22:01 +00:00
|
|
|
self.error("parsing function type, '(' expected", token)
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
else:
|
2013-02-07 07:22:01 +00:00
|
|
|
self.error("parsing function type, ')' expected", token)
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
|
|
|
|
|
|
|
#
|
|
|
|
# do some lookahead for arrays
|
|
|
|
#
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '[':
|
2011-06-20 03:25:34 +00:00
|
|
|
self.type = self.type + " " + nametok[1]
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == "sep" and token[1] == '[':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] != 'sep' and \
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1] != ']' and token[1] != ';':
|
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == 'sep' and token[1] == ']':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
|
|
|
else:
|
2013-02-07 07:22:01 +00:00
|
|
|
self.error("parsing array type, ']' expected", token)
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ':':
|
2011-02-16 15:57:50 +00:00
|
|
|
# remove :12 in case it's a limited int size
|
|
|
|
token = self.token()
|
|
|
|
token = self.token()
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a signature: '(' has been parsed and we scan the type definition
|
|
|
|
# up to the ')' included
|
|
|
|
def parseSignature(self, token):
|
|
|
|
signature = []
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ')':
|
2011-02-16 15:57:50 +00:00
|
|
|
self.signature = []
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.parseType(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
signature.append((self.type, token[1], None))
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ',':
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
|
|
|
continue
|
2013-08-22 09:16:03 +00:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ')':
|
2011-02-16 15:57:50 +00:00
|
|
|
# only the type was provided
|
|
|
|
if self.type == "...":
|
|
|
|
signature.append((self.type, "...", None))
|
|
|
|
else:
|
|
|
|
signature.append((self.type, None, None))
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep":
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[1] == ',':
|
|
|
|
token = self.token()
|
|
|
|
continue
|
|
|
|
elif token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
break
|
|
|
|
self.signature = signature
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2011-05-30 12:36:41 +00:00
|
|
|
# this dict contains the functions that are allowed to use [unsigned]
|
|
|
|
# long for legacy reasons in their signature and return type. this list is
|
|
|
|
# fixed. new procedures and public APIs have to use [unsigned] long long
|
|
|
|
long_legacy_functions = \
|
|
|
|
{ "virGetVersion" : (False, ("libVer", "typeVer")),
|
|
|
|
"virConnectGetLibVersion" : (False, ("libVer")),
|
|
|
|
"virConnectGetVersion" : (False, ("hvVer")),
|
|
|
|
"virDomainGetMaxMemory" : (True, ()),
|
|
|
|
"virDomainMigrate" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrate2" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateBegin3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateConfirm3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateDirect" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateFinish" : (False, ("flags")),
|
|
|
|
"virDomainMigrateFinish2" : (False, ("flags")),
|
|
|
|
"virDomainMigrateFinish3" : (False, ("flags")),
|
|
|
|
"virDomainMigratePeer2Peer" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePerform" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePerform3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare2" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepareTunnel" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepareTunnel3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateToURI" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateToURI2" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion1" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion2" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion3" : (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateSetMaxSpeed" : (False, ("bandwidth")),
|
|
|
|
"virDomainSetMaxMemory" : (False, ("memory")),
|
|
|
|
"virDomainSetMemory" : (False, ("memory")),
|
2011-07-22 05:18:06 +00:00
|
|
|
"virDomainSetMemoryFlags" : (False, ("memory")),
|
blockjob: add virDomainBlockCommit
A block commit moves data in the opposite direction of block pull.
Block pull reduces the chain length by dropping backing files after
data has been pulled into the top overlay, and is always safe; block
commit reduces the chain length by dropping overlays after data has
been committed into the backing file, and any files that depended
on base but not on top are invalidated at any point where they have
unallocated data that is now pointing to changed contents in base.
Both directions are useful, however: a qcow2 layer that is more than
50% allocated will typically be faster with a pull operation, while
a qcow2 layer with less than 50% allocation will be faster as a
commit operation. Committing across multiple layers can be more
efficient than repeatedly committing one layer at a time, but
requires extra support from the hypervisor.
This API matches Jeff Cody's proposed qemu command 'block-commit':
https://lists.gnu.org/archive/html/qemu-devel/2012-09/msg02226.html
Jeff's command is still in the works for qemu 1.3, and may gain
further enhancements, such as the ability to control on-error
handling (it will be comparable to the error handling Paolo is
adding to 'drive-mirror', so a similar solution will be needed
when I finally propose virDomainBlockCopy with more functionality
than the basics supported by virDomainBlockRebase). However, even
without qemu support, this API will be useful for _offline_ block
commits, by wrapping qemu-img calls and turning them into a block
job, so this API is worth committing now.
For some examples of how this will be implemented, all starting
with the chain: base <- snap1 <- snap2 <- active
+ These are equivalent:
virDomainBlockCommit(dom, disk, NULL, NULL, 0, 0)
virDomainBlockCommit(dom, disk, NULL, "active", 0, 0)
virDomainBlockCommit(dom, disk, "base", NULL, 0, 0)
virDomainBlockCommit(dom, disk, "base", "active", 0, 0)
but cannot be implemented for online qemu with round 1 of
Jeff's patches; and for offline images, it would require
three back-to-back qemu-img invocations unless qemu-img
is patched to allow more efficient multi-layer commits;
the end result would be 'base' as the active disk with
contents from all three other files, where 'snap1' and
'snap2' are invalid right away, and 'active' is invalid
once any further changes to 'base' are made.
+ These are equivalent:
virDomainBlockCommit(dom, disk, "snap2", NULL, 0, 0)
virDomainBlockCommit(dom, disk, NULL, NULL, 0, _SHALLOW)
they cannot be implemented for online qemu, but for offline,
it is a matter of 'qemu-img commit active', so that 'snap2'
is now the active disk with contents formerly in 'active'.
+ Similarly:
virDomainBlockCommit(dom, disk, "snap2", NULL, 0, _DELETE)
for an offline domain will merge 'active' into 'snap2', then
delete 'active' to avoid leaving a potentially invalid file
around.
+ This version:
virDomainBlockCommit(dom, disk, NULL, "snap2", 0, _SHALLOW)
can be implemented online with 'block-commit' passing a base of
snap1 and a top of snap2; and can be implemented offline by
'qemu-img commit snap2' followed by 'qemu-img rebase -u
-b snap1 active'
* include/libvirt/libvirt.h.in (virDomainBlockCommit): New API.
* src/libvirt.c (virDomainBlockCommit): Implement it.
* src/libvirt_public.syms (LIBVIRT_0.10.2): Export it.
* src/driver.h (virDrvDomainBlockCommit): New driver callback.
* docs/apibuild.py (CParser.parseSignature): Add exception.
2012-09-17 17:56:27 +00:00
|
|
|
"virDomainBlockCommit" : (False, ("bandwidth")),
|
2011-07-22 05:18:06 +00:00
|
|
|
"virDomainBlockJobSetSpeed" : (False, ("bandwidth")),
|
2011-08-26 18:10:21 +00:00
|
|
|
"virDomainBlockPull" : (False, ("bandwidth")),
|
block rebase: add new API virDomainBlockRebase
Qemu is adding the ability to do a partial rebase. That is, given:
base <- intermediate <- current
virDomainBlockPull will produce:
current
but qemu now has the ability to leave base in the chain, to produce:
base <- current
Note that current qemu can only do a forward merge, and only with
the current image as the destination, which is fully described by
this API without flags. But in the future, it may be possible to
enhance this API for additional scenarios by using flags:
Merging the current image back into a previous image (that is,
undoing a live snapshot), could be done by passing base as the
destination and flags with a bit requesting a backward merge.
Merging any other part of the image chain, whether forwards (the
backing image contents are pulled into the newer file) or backwards
(the deltas recorded in the newer file are merged back into the
backing file), could also be done by passing a new flag that says
that base should be treated as an XML snippet rather than an
absolute path name, where the XML could then supply the additional
instructions of which part of the image chain is being merged into
any other part.
* include/libvirt/libvirt.h.in (virDomainBlockRebase): New
declaration.
* src/libvirt.c (virDomainBlockRebase): Implement it.
* src/libvirt_public.syms (LIBVIRT_0.9.10): Export it.
* src/driver.h (virDrvDomainBlockRebase): New driver callback.
* src/rpc/gendispatch.pl (long_legacy): Add exemption.
* docs/apibuild.py (long_legacy_functions): Likewise.
2012-02-01 04:19:51 +00:00
|
|
|
"virDomainBlockRebase" : (False, ("bandwidth")),
|
2011-08-26 18:10:21 +00:00
|
|
|
"virDomainMigrateGetMaxSpeed" : (False, ("bandwidth")) }
|
2011-05-30 12:36:41 +00:00
|
|
|
|
|
|
|
def checkLongLegacyFunction(self, name, return_type, signature):
|
|
|
|
if "long" in return_type and "long long" not in return_type:
|
|
|
|
try:
|
|
|
|
if not CParser.long_legacy_functions[name][0]:
|
|
|
|
raise Exception()
|
|
|
|
except:
|
|
|
|
self.error(("function '%s' is not allowed to return long, "
|
|
|
|
"use long long instead") % (name))
|
|
|
|
|
|
|
|
for param in signature:
|
|
|
|
if "long" in param[0] and "long long" not in param[0]:
|
|
|
|
try:
|
|
|
|
if param[1] not in CParser.long_legacy_functions[name][1]:
|
|
|
|
raise Exception()
|
|
|
|
except:
|
|
|
|
self.error(("function '%s' is not allowed to take long "
|
|
|
|
"parameter '%s', use long long instead")
|
|
|
|
% (name, param[1]))
|
|
|
|
|
|
|
|
# this dict contains the structs that are allowed to use [unsigned]
|
|
|
|
# long for legacy reasons. this list is fixed. new structs have to use
|
|
|
|
# [unsigned] long long
|
|
|
|
long_legacy_struct_fields = \
|
|
|
|
{ "_virDomainInfo" : ("maxMem", "memory"),
|
2011-07-22 05:18:06 +00:00
|
|
|
"_virNodeInfo" : ("memory"),
|
|
|
|
"_virDomainBlockJobInfo" : ("bandwidth") }
|
2011-05-30 12:36:41 +00:00
|
|
|
|
|
|
|
def checkLongLegacyStruct(self, name, fields):
|
|
|
|
for field in fields:
|
|
|
|
if "long" in field[0] and "long long" not in field[0]:
|
|
|
|
try:
|
|
|
|
if field[1] not in CParser.long_legacy_struct_fields[name]:
|
|
|
|
raise Exception()
|
|
|
|
except:
|
|
|
|
self.error(("struct '%s' is not allowed to contain long "
|
|
|
|
"field '%s', use long long instead") \
|
|
|
|
% (name, field[1]))
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
#
|
|
|
|
# Parse a global definition, be it a type, variable or function
|
|
|
|
# the extern "C" blocks are a bit nasty and require it to recurse.
|
|
|
|
#
|
|
|
|
def parseGlobal(self, token):
|
|
|
|
static = 0
|
|
|
|
if token[1] == 'extern':
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
if token[0] == 'string':
|
|
|
|
if token[1] == 'C':
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
if token[0] == 'sep' and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
# print 'Entering extern "C line ', self.lineno()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and (token[0] != 'sep' or
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1] != "}"):
|
|
|
|
if token[0] == 'name':
|
|
|
|
token = self.parseGlobal(token)
|
|
|
|
else:
|
|
|
|
self.error(
|
|
|
|
"token %s %s unexpected at the top level" % (
|
|
|
|
token[0], token[1]))
|
|
|
|
token = self.parseGlobal(token)
|
|
|
|
# print 'Exiting extern "C" line', self.lineno()
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
return token
|
|
|
|
elif token[1] == 'static':
|
|
|
|
static = 1
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None or token[0] != 'name':
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
|
|
|
|
if token[1] == 'typedef':
|
|
|
|
token = self.token()
|
|
|
|
return self.parseTypedef(token)
|
|
|
|
else:
|
|
|
|
token = self.parseType(token)
|
|
|
|
type_orig = self.type
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None or token[0] != "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
return token
|
|
|
|
type = type_orig
|
|
|
|
self.name = token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and (token[0] == "sep" or token[0] == "op"):
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[0] == "sep":
|
|
|
|
if token[1] == "[":
|
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and (token[0] != "sep" or \
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1] != ";"):
|
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "op" and token[1] == "=":
|
2011-02-16 15:57:50 +00:00
|
|
|
#
|
|
|
|
# Skip the initialization of the variable
|
|
|
|
#
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == 'sep' and token[1] == '{':
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseBlock(token)
|
|
|
|
else:
|
|
|
|
self.comment = None
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and (token[0] != "sep" or \
|
2011-02-16 15:57:50 +00:00
|
|
|
(token[1] != ';' and token[1] != ',')):
|
|
|
|
token = self.token()
|
|
|
|
self.comment = None
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None or token[0] != "sep" or (token[1] != ';' and
|
2011-02-16 15:57:50 +00:00
|
|
|
token[1] != ','):
|
|
|
|
self.error("missing ';' or ',' after value")
|
|
|
|
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "sep":
|
2011-02-16 15:57:50 +00:00
|
|
|
if token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
if type == "struct":
|
2011-05-30 12:36:41 +00:00
|
|
|
self.checkLongLegacyStruct(self.name, self.struct_fields)
|
2011-02-16 15:57:50 +00:00
|
|
|
self.index_add(self.name, self.filename,
|
|
|
|
not self.is_header, "struct", self.struct_fields)
|
|
|
|
else:
|
|
|
|
self.index_add(self.name, self.filename,
|
|
|
|
not self.is_header, "variable", type)
|
|
|
|
break
|
|
|
|
elif token[1] == "(":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseSignature(token)
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
2011-05-30 12:36:41 +00:00
|
|
|
self.checkLongLegacyFunction(self.name, type, self.signature)
|
2011-02-16 15:57:50 +00:00
|
|
|
d = self.mergeFunctionComment(self.name,
|
|
|
|
((type, None), self.signature), 1)
|
|
|
|
self.index_add(self.name, self.filename, static,
|
|
|
|
"function", d)
|
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "sep" and token[1] == "{":
|
2011-05-30 12:36:41 +00:00
|
|
|
self.checkLongLegacyFunction(self.name, type, self.signature)
|
2011-02-16 15:57:50 +00:00
|
|
|
d = self.mergeFunctionComment(self.name,
|
|
|
|
((type, None), self.signature), static)
|
|
|
|
self.index_add(self.name, self.filename, static,
|
|
|
|
"function", d)
|
|
|
|
token = self.token()
|
2013-02-07 07:22:01 +00:00
|
|
|
token = self.parseBlock(token)
|
2011-02-16 15:57:50 +00:00
|
|
|
elif token[1] == ',':
|
|
|
|
self.comment = None
|
|
|
|
self.index_add(self.name, self.filename, static,
|
|
|
|
"variable", type)
|
|
|
|
type = type_orig
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None and token[0] == "sep":
|
2011-02-16 15:57:50 +00:00
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 15:57:50 +00:00
|
|
|
self.name = token[1]
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
return token
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def parse(self):
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
|
|
|
print "Parsing %s" % (self.filename)
|
2005-12-01 17:34:21 +00:00
|
|
|
token = self.token()
|
2013-08-22 09:16:03 +00:00
|
|
|
while token is not None:
|
2005-12-01 17:34:21 +00:00
|
|
|
if token[0] == 'name':
|
2011-02-16 15:57:50 +00:00
|
|
|
token = self.parseGlobal(token)
|
2005-12-01 17:34:21 +00:00
|
|
|
else:
|
2011-02-16 15:57:50 +00:00
|
|
|
self.error("token %s %s unexpected at the top level" % (
|
|
|
|
token[0], token[1]))
|
|
|
|
token = self.parseGlobal(token)
|
|
|
|
return
|
|
|
|
self.parseTopComment(self.top_comment)
|
2005-12-01 17:34:21 +00:00
|
|
|
return self.index
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
class docBuilder:
|
|
|
|
"""A documentation builder"""
|
2011-02-16 14:09:09 +00:00
|
|
|
def __init__(self, name, path='.', directories=['.'], includes=[]):
|
2005-12-01 17:34:21 +00:00
|
|
|
self.name = name
|
2011-02-16 14:09:09 +00:00
|
|
|
self.path = path
|
2005-12-01 17:34:21 +00:00
|
|
|
self.directories = directories
|
2011-09-09 10:55:21 +00:00
|
|
|
if name == "libvirt":
|
|
|
|
self.includes = includes + included_files.keys()
|
|
|
|
elif name == "libvirt-qemu":
|
|
|
|
self.includes = includes + qemu_included_files.keys()
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 13:15:19 +00:00
|
|
|
elif name == "libvirt-lxc":
|
|
|
|
self.includes = includes + lxc_included_files.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
self.modules = {}
|
|
|
|
self.headers = {}
|
|
|
|
self.idx = index()
|
2005-12-01 17:34:21 +00:00
|
|
|
self.xref = {}
|
2011-02-16 15:57:50 +00:00
|
|
|
self.index = {}
|
|
|
|
self.basename = name
|
2013-01-29 14:35:28 +00:00
|
|
|
self.errors = 0
|
2005-12-01 17:34:21 +00:00
|
|
|
|
2012-07-27 13:03:03 +00:00
|
|
|
def warning(self, msg):
|
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
|
|
|
print msg
|
|
|
|
|
2013-01-29 14:35:28 +00:00
|
|
|
def error(self, msg):
|
|
|
|
self.errors += 1
|
|
|
|
print >>sys.stderr, "Error:", msg
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def indexString(self, id, str):
|
2013-08-22 09:16:03 +00:00
|
|
|
if str is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
return
|
|
|
|
str = string.replace(str, "'", ' ')
|
|
|
|
str = string.replace(str, '"', ' ')
|
|
|
|
str = string.replace(str, "/", ' ')
|
|
|
|
str = string.replace(str, '*', ' ')
|
|
|
|
str = string.replace(str, "[", ' ')
|
|
|
|
str = string.replace(str, "]", ' ')
|
|
|
|
str = string.replace(str, "(", ' ')
|
|
|
|
str = string.replace(str, ")", ' ')
|
|
|
|
str = string.replace(str, "<", ' ')
|
|
|
|
str = string.replace(str, '>', ' ')
|
|
|
|
str = string.replace(str, "&", ' ')
|
|
|
|
str = string.replace(str, '#', ' ')
|
|
|
|
str = string.replace(str, ",", ' ')
|
|
|
|
str = string.replace(str, '.', ' ')
|
|
|
|
str = string.replace(str, ';', ' ')
|
|
|
|
tokens = string.split(str)
|
|
|
|
for token in tokens:
|
|
|
|
try:
|
|
|
|
c = token[0]
|
|
|
|
if string.find(string.letters, c) < 0:
|
|
|
|
pass
|
|
|
|
elif len(token) < 3:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
lower = string.lower(token)
|
|
|
|
# TODO: generalize this a bit
|
|
|
|
if lower == 'and' or lower == 'the':
|
|
|
|
pass
|
|
|
|
elif self.xref.has_key(token):
|
|
|
|
self.xref[token].append(id)
|
|
|
|
else:
|
|
|
|
self.xref[token] = [id]
|
|
|
|
except:
|
|
|
|
pass
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def analyze(self):
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
|
|
|
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
|
2011-02-16 15:57:50 +00:00
|
|
|
self.idx.analyze()
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def scanHeaders(self):
|
2011-02-16 15:57:50 +00:00
|
|
|
for header in self.headers.keys():
|
|
|
|
parser = CParser(header)
|
|
|
|
idx = parser.parse()
|
2013-02-07 07:22:01 +00:00
|
|
|
self.headers[header] = idx
|
2011-02-16 15:57:50 +00:00
|
|
|
self.idx.merge(idx)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def scanModules(self):
|
2011-02-16 15:57:50 +00:00
|
|
|
for module in self.modules.keys():
|
|
|
|
parser = CParser(module)
|
|
|
|
idx = parser.parse()
|
|
|
|
# idx.analyze()
|
|
|
|
self.modules[module] = idx
|
|
|
|
self.idx.merge_public(idx)
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def scan(self):
|
|
|
|
for directory in self.directories:
|
2011-02-16 15:57:50 +00:00
|
|
|
files = glob.glob(directory + "/*.c")
|
|
|
|
for file in files:
|
|
|
|
skip = 1
|
|
|
|
for incl in self.includes:
|
|
|
|
if string.find(file, incl) != -1:
|
2013-02-07 07:22:01 +00:00
|
|
|
skip = 0
|
2011-02-16 15:57:50 +00:00
|
|
|
break
|
|
|
|
if skip == 0:
|
2013-02-07 07:22:01 +00:00
|
|
|
self.modules[file] = None
|
2011-02-16 15:57:50 +00:00
|
|
|
files = glob.glob(directory + "/*.h")
|
|
|
|
for file in files:
|
|
|
|
skip = 1
|
|
|
|
for incl in self.includes:
|
|
|
|
if string.find(file, incl) != -1:
|
2013-02-07 07:22:01 +00:00
|
|
|
skip = 0
|
2011-02-16 15:57:50 +00:00
|
|
|
break
|
|
|
|
if skip == 0:
|
2013-02-07 07:22:01 +00:00
|
|
|
self.headers[file] = None
|
2011-02-16 15:57:50 +00:00
|
|
|
self.scanHeaders()
|
|
|
|
self.scanModules()
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def modulename_file(self, file):
|
|
|
|
module = os.path.basename(file)
|
2011-02-16 15:57:50 +00:00
|
|
|
if module[-2:] == '.h':
|
|
|
|
module = module[:-2]
|
|
|
|
elif module[-2:] == '.c':
|
|
|
|
module = module[:-2]
|
|
|
|
return module
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_enum(self, output, name):
|
|
|
|
id = self.idx.enums[name]
|
|
|
|
output.write(" <enum name='%s' file='%s'" % (name,
|
2011-02-16 15:57:50 +00:00
|
|
|
self.modulename_file(id.header)))
|
2013-08-22 09:16:03 +00:00
|
|
|
if id.info is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
info = id.info
|
2013-08-22 09:16:03 +00:00
|
|
|
if info[0] is not None and info[0] != '':
|
2011-02-16 15:57:50 +00:00
|
|
|
try:
|
|
|
|
val = eval(info[0])
|
|
|
|
except:
|
|
|
|
val = info[0]
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write(" value='%s'" % (val))
|
2013-08-22 09:16:03 +00:00
|
|
|
if info[2] is not None and info[2] != '':
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write(" type='%s'" % info[2])
|
2013-08-22 09:16:03 +00:00
|
|
|
if info[1] is not None and info[1] != '':
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write(" info='%s'" % escape(info[1]))
|
2005-12-01 17:34:21 +00:00
|
|
|
output.write("/>\n")
|
|
|
|
|
|
|
|
def serialize_macro(self, output, name):
|
|
|
|
id = self.idx.macros[name]
|
|
|
|
output.write(" <macro name='%s' file='%s'>\n" % (name,
|
2011-02-16 15:57:50 +00:00
|
|
|
self.modulename_file(id.header)))
|
2013-08-22 09:16:03 +00:00
|
|
|
if id.info is not None:
|
2005-12-01 17:34:21 +00:00
|
|
|
try:
|
2011-02-16 15:57:50 +00:00
|
|
|
(args, desc) = id.info
|
2013-08-22 09:16:03 +00:00
|
|
|
if desc is not None and desc != "":
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
|
|
|
|
self.indexString(name, desc)
|
|
|
|
for arg in args:
|
|
|
|
(name, desc) = arg
|
2013-08-22 09:16:03 +00:00
|
|
|
if desc is not None and desc != "":
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" <arg name='%s' info='%s'/>\n" % (
|
|
|
|
name, escape(desc)))
|
|
|
|
self.indexString(name, desc)
|
|
|
|
else:
|
|
|
|
output.write(" <arg name='%s'/>\n" % (name))
|
2005-12-01 17:34:21 +00:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
output.write(" </macro>\n")
|
|
|
|
|
2011-06-20 03:25:34 +00:00
|
|
|
def serialize_union(self, output, field, desc):
|
|
|
|
output.write(" <field name='%s' type='union' info='%s'>\n" % (field[1] , desc))
|
|
|
|
output.write(" <union>\n")
|
|
|
|
for f in field[3]:
|
|
|
|
desc = f[2]
|
2013-08-22 09:16:03 +00:00
|
|
|
if desc is None:
|
2011-06-20 03:25:34 +00:00
|
|
|
desc = ''
|
|
|
|
else:
|
|
|
|
desc = escape(desc)
|
|
|
|
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (f[1] , f[0], desc))
|
|
|
|
|
|
|
|
output.write(" </union>\n")
|
|
|
|
output.write(" </field>\n")
|
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def serialize_typedef(self, output, name):
|
|
|
|
id = self.idx.typedefs[name]
|
2011-02-16 15:57:50 +00:00
|
|
|
if id.info[0:7] == 'struct ':
|
|
|
|
output.write(" <struct name='%s' file='%s' type='%s'" % (
|
|
|
|
name, self.modulename_file(id.header), id.info))
|
|
|
|
name = id.info[7:]
|
|
|
|
if self.idx.structs.has_key(name) and ( \
|
|
|
|
type(self.idx.structs[name].info) == type(()) or
|
|
|
|
type(self.idx.structs[name].info) == type([])):
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write(">\n")
|
2011-02-16 15:57:50 +00:00
|
|
|
try:
|
|
|
|
for field in self.idx.structs[name].info:
|
|
|
|
desc = field[2]
|
|
|
|
self.indexString(name, desc)
|
2013-08-22 09:16:03 +00:00
|
|
|
if desc is None:
|
2011-02-16 15:57:50 +00:00
|
|
|
desc = ''
|
|
|
|
else:
|
|
|
|
desc = escape(desc)
|
2011-06-20 03:25:34 +00:00
|
|
|
if field[0] == "union":
|
|
|
|
self.serialize_union(output, field, desc)
|
|
|
|
else:
|
|
|
|
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
|
2011-02-16 15:57:50 +00:00
|
|
|
except:
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("Failed to serialize struct %s" % (name))
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </struct>\n")
|
|
|
|
else:
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write("/>\n")
|
2011-02-16 15:57:50 +00:00
|
|
|
else :
|
|
|
|
output.write(" <typedef name='%s' file='%s' type='%s'" % (
|
|
|
|
name, self.modulename_file(id.header), id.info))
|
2005-12-01 17:34:21 +00:00
|
|
|
try:
|
2011-02-16 15:57:50 +00:00
|
|
|
desc = id.extra
|
2013-08-22 09:16:03 +00:00
|
|
|
if desc is not None and desc != "":
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(">\n <info><![CDATA[%s]]></info>\n" % (desc))
|
|
|
|
output.write(" </typedef>\n")
|
|
|
|
else:
|
|
|
|
output.write("/>\n")
|
|
|
|
except:
|
|
|
|
output.write("/>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_variable(self, output, name):
|
|
|
|
id = self.idx.variables[name]
|
2013-08-22 09:16:03 +00:00
|
|
|
if id.info is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
|
|
|
|
name, self.modulename_file(id.header), id.info))
|
|
|
|
else:
|
|
|
|
output.write(" <variable name='%s' file='%s'/>\n" % (
|
|
|
|
name, self.modulename_file(id.header)))
|
2008-02-05 19:27:37 +00:00
|
|
|
|
2005-12-01 17:34:21 +00:00
|
|
|
def serialize_function(self, output, name):
|
|
|
|
id = self.idx.functions[name]
|
2011-05-12 10:19:42 +00:00
|
|
|
if name == debugsym and not quiet:
|
2011-02-16 15:57:50 +00:00
|
|
|
print "=>", id
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
|
2011-02-16 15:57:50 +00:00
|
|
|
name, self.modulename_file(id.header),
|
|
|
|
self.modulename_file(id.module)))
|
|
|
|
#
|
|
|
|
# Processing of conditionals modified by Bill 1/1/05
|
|
|
|
#
|
2013-08-22 09:16:03 +00:00
|
|
|
if id.conditionals is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
apstr = ""
|
|
|
|
for cond in id.conditionals:
|
|
|
|
if apstr != "":
|
|
|
|
apstr = apstr + " && "
|
|
|
|
apstr = apstr + cond
|
2013-02-07 07:22:01 +00:00
|
|
|
output.write(" <cond>%s</cond>\n"% (apstr))
|
2011-02-16 15:57:50 +00:00
|
|
|
try:
|
|
|
|
(ret, params, desc) = id.info
|
|
|
|
output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
|
|
|
|
self.indexString(name, desc)
|
2013-08-22 09:16:03 +00:00
|
|
|
if ret[0] is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
if ret[0] == "void":
|
|
|
|
output.write(" <return type='void'/>\n")
|
2013-08-22 09:16:03 +00:00
|
|
|
elif (ret[1] is None or ret[1] == '') and not ignored_functions.has_key(name):
|
2013-01-29 14:35:28 +00:00
|
|
|
self.error("Missing documentation for return of function `%s'" % name)
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
output.write(" <return type='%s' info='%s'/>\n" % (
|
|
|
|
ret[0], escape(ret[1])))
|
|
|
|
self.indexString(name, ret[1])
|
|
|
|
for param in params:
|
|
|
|
if param[0] == 'void':
|
|
|
|
continue
|
2013-08-22 09:16:03 +00:00
|
|
|
if (param[2] is None or param[2] == ''):
|
2013-01-29 14:35:28 +00:00
|
|
|
if ignored_functions.has_key(name):
|
|
|
|
output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
|
|
|
|
else:
|
|
|
|
self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
|
2011-02-16 15:57:50 +00:00
|
|
|
else:
|
|
|
|
output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
|
|
|
|
self.indexString(name, param[2])
|
|
|
|
except:
|
apibuild.py: fix TypeError raised in except clause
When an exception happened inside the try clause in serialize_function,
a new exception was raised in the except clause subsequently:
Traceback (most recent call last):
File "./apibuild.py", line 2529, in <module>
rebuild("libvirt")
File "./apibuild.py", line 2513, in rebuild
builder.serialize()
File "./apibuild.py", line 2467, in serialize
self.serialize_function(output, function)
File "./apibuild.py", line 2208, in serialize_function
self.warning("Failed to save function %s info: " % name, `id.info`)
TypeError: warning() takes exactly 2 arguments (3 given)
Use the correct number of arguments for self.warning and print the
original exception to stderr.
2013-01-30 08:33:36 +00:00
|
|
|
print >>sys.stderr, "Exception:", sys.exc_info()[1]
|
|
|
|
self.warning("Failed to save function %s info: %s" % (name, `id.info`))
|
2005-12-01 17:34:21 +00:00
|
|
|
output.write(" </%s>\n" % (id.type))
|
|
|
|
|
|
|
|
def serialize_exports(self, output, file):
|
|
|
|
module = self.modulename_file(file)
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" <file name='%s'>\n" % (module))
|
|
|
|
dict = self.headers[file]
|
2013-08-22 09:16:03 +00:00
|
|
|
if dict.info is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
for data in ('Summary', 'Description', 'Author'):
|
|
|
|
try:
|
|
|
|
output.write(" <%s>%s</%s>\n" % (
|
|
|
|
string.lower(data),
|
|
|
|
escape(dict.info[data]),
|
|
|
|
string.lower(data)))
|
|
|
|
except:
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("Header %s lacks a %s description" % (module, data))
|
2011-02-16 15:57:50 +00:00
|
|
|
if dict.info.has_key('Description'):
|
|
|
|
desc = dict.info['Description']
|
|
|
|
if string.find(desc, "DEPRECATED") != -1:
|
|
|
|
output.write(" <deprecated/>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
ids = dict.macros.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
# Macros are sometime used to masquerade other types.
|
|
|
|
if dict.functions.has_key(id):
|
|
|
|
continue
|
|
|
|
if dict.variables.has_key(id):
|
|
|
|
continue
|
|
|
|
if dict.typedefs.has_key(id):
|
|
|
|
continue
|
|
|
|
if dict.structs.has_key(id):
|
|
|
|
continue
|
2011-06-20 03:25:34 +00:00
|
|
|
if dict.unions.has_key(id):
|
|
|
|
continue
|
2011-02-16 15:57:50 +00:00
|
|
|
if dict.enums.has_key(id):
|
|
|
|
continue
|
|
|
|
output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
|
2005-12-01 17:34:21 +00:00
|
|
|
ids = dict.enums.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
|
2005-12-01 17:34:21 +00:00
|
|
|
ids = dict.typedefs.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
|
2005-12-01 17:34:21 +00:00
|
|
|
ids = dict.structs.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
|
2005-12-01 17:34:21 +00:00
|
|
|
ids = dict.variables.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
|
2005-12-01 17:34:21 +00:00
|
|
|
ids = dict.functions.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
ids.sort()
|
|
|
|
for id in uniq(ids):
|
|
|
|
output.write(" <exports symbol='%s' type='function'/>\n" % (id))
|
|
|
|
output.write(" </file>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs_files(self, output):
|
|
|
|
headers = self.headers.keys()
|
|
|
|
headers.sort()
|
|
|
|
for file in headers:
|
2011-02-16 15:57:50 +00:00
|
|
|
module = self.modulename_file(file)
|
|
|
|
output.write(" <file name='%s'>\n" % (module))
|
|
|
|
dict = self.headers[file]
|
|
|
|
ids = uniq(dict.functions.keys() + dict.variables.keys() + \
|
|
|
|
dict.macros.keys() + dict.typedefs.keys() + \
|
|
|
|
dict.structs.keys() + dict.enums.keys())
|
|
|
|
ids.sort()
|
|
|
|
for id in ids:
|
|
|
|
output.write(" <ref name='%s'/>\n" % (id))
|
|
|
|
output.write(" </file>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
def serialize_xrefs_functions(self, output):
|
|
|
|
funcs = {}
|
2011-02-16 15:57:50 +00:00
|
|
|
for name in self.idx.functions.keys():
|
|
|
|
id = self.idx.functions[name]
|
|
|
|
try:
|
|
|
|
(ret, params, desc) = id.info
|
|
|
|
for param in params:
|
|
|
|
if param[0] == 'void':
|
|
|
|
continue
|
|
|
|
if funcs.has_key(param[0]):
|
|
|
|
funcs[param[0]].append(name)
|
|
|
|
else:
|
|
|
|
funcs[param[0]] = [name]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
typ = funcs.keys()
|
|
|
|
typ.sort()
|
|
|
|
for type in typ:
|
|
|
|
if type == '' or type == 'void' or type == "int" or \
|
|
|
|
type == "char *" or type == "const char *" :
|
|
|
|
continue
|
|
|
|
output.write(" <type name='%s'>\n" % (type))
|
|
|
|
ids = funcs[type]
|
|
|
|
ids.sort()
|
|
|
|
pid = '' # not sure why we have dups, but get rid of them!
|
|
|
|
for id in ids:
|
|
|
|
if id != pid:
|
|
|
|
output.write(" <ref name='%s'/>\n" % (id))
|
|
|
|
pid = id
|
|
|
|
output.write(" </type>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs_constructors(self, output):
|
|
|
|
funcs = {}
|
2011-02-16 15:57:50 +00:00
|
|
|
for name in self.idx.functions.keys():
|
|
|
|
id = self.idx.functions[name]
|
|
|
|
try:
|
|
|
|
(ret, params, desc) = id.info
|
|
|
|
if ret[0] == "void":
|
|
|
|
continue
|
|
|
|
if funcs.has_key(ret[0]):
|
|
|
|
funcs[ret[0]].append(name)
|
|
|
|
else:
|
|
|
|
funcs[ret[0]] = [name]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
typ = funcs.keys()
|
|
|
|
typ.sort()
|
|
|
|
for type in typ:
|
|
|
|
if type == '' or type == 'void' or type == "int" or \
|
|
|
|
type == "char *" or type == "const char *" :
|
|
|
|
continue
|
|
|
|
output.write(" <type name='%s'>\n" % (type))
|
|
|
|
ids = funcs[type]
|
|
|
|
ids.sort()
|
|
|
|
for id in ids:
|
|
|
|
output.write(" <ref name='%s'/>\n" % (id))
|
|
|
|
output.write(" </type>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs_alpha(self, output):
|
2011-02-16 15:57:50 +00:00
|
|
|
letter = None
|
|
|
|
ids = self.idx.identifiers.keys()
|
|
|
|
ids.sort()
|
|
|
|
for id in ids:
|
|
|
|
if id[0] != letter:
|
2013-08-22 09:16:03 +00:00
|
|
|
if letter is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </letter>\n")
|
|
|
|
letter = id[0]
|
|
|
|
output.write(" <letter name='%s'>\n" % (letter))
|
|
|
|
output.write(" <ref name='%s'/>\n" % (id))
|
2013-08-22 09:16:03 +00:00
|
|
|
if letter is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </letter>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs_references(self, output):
|
|
|
|
typ = self.idx.identifiers.keys()
|
2011-02-16 15:57:50 +00:00
|
|
|
typ.sort()
|
|
|
|
for id in typ:
|
|
|
|
idf = self.idx.identifiers[id]
|
|
|
|
module = idf.header
|
|
|
|
output.write(" <reference name='%s' href='%s'/>\n" % (id,
|
|
|
|
'html/' + self.basename + '-' +
|
|
|
|
self.modulename_file(module) + '.html#' +
|
|
|
|
id))
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs_index(self, output):
|
|
|
|
index = self.xref
|
2011-02-16 15:57:50 +00:00
|
|
|
typ = index.keys()
|
|
|
|
typ.sort()
|
|
|
|
letter = None
|
|
|
|
count = 0
|
|
|
|
chunk = 0
|
|
|
|
chunks = []
|
|
|
|
for id in typ:
|
|
|
|
if len(index[id]) > 30:
|
|
|
|
continue
|
|
|
|
if id[0] != letter:
|
2013-08-22 09:16:03 +00:00
|
|
|
if letter is None or count > 200:
|
|
|
|
if letter is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </letter>\n")
|
|
|
|
output.write(" </chunk>\n")
|
|
|
|
count = 0
|
|
|
|
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
|
|
|
|
output.write(" <chunk name='chunk%s'>\n" % (chunk))
|
|
|
|
first_letter = id[0]
|
|
|
|
chunk = chunk + 1
|
2013-08-22 09:16:03 +00:00
|
|
|
elif letter is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </letter>\n")
|
|
|
|
letter = id[0]
|
|
|
|
output.write(" <letter name='%s'>\n" % (letter))
|
|
|
|
output.write(" <word name='%s'>\n" % (id))
|
2013-02-07 07:22:01 +00:00
|
|
|
tokens = index[id]
|
2011-02-16 15:57:50 +00:00
|
|
|
tokens.sort()
|
|
|
|
tok = None
|
|
|
|
for token in tokens:
|
|
|
|
if tok == token:
|
|
|
|
continue
|
|
|
|
tok = token
|
|
|
|
output.write(" <ref name='%s'/>\n" % (token))
|
|
|
|
count = count + 1
|
|
|
|
output.write(" </word>\n")
|
2013-08-22 09:16:03 +00:00
|
|
|
if letter is not None:
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" </letter>\n")
|
|
|
|
output.write(" </chunk>\n")
|
|
|
|
if count != 0:
|
|
|
|
chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
|
|
|
|
output.write(" <chunks>\n")
|
|
|
|
for ch in chunks:
|
|
|
|
output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
|
|
|
|
ch[0], ch[1], ch[2]))
|
|
|
|
output.write(" </chunks>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize_xrefs(self, output):
|
2011-02-16 15:57:50 +00:00
|
|
|
output.write(" <references>\n")
|
|
|
|
self.serialize_xrefs_references(output)
|
|
|
|
output.write(" </references>\n")
|
|
|
|
output.write(" <alpha>\n")
|
|
|
|
self.serialize_xrefs_alpha(output)
|
|
|
|
output.write(" </alpha>\n")
|
|
|
|
output.write(" <constructors>\n")
|
|
|
|
self.serialize_xrefs_constructors(output)
|
|
|
|
output.write(" </constructors>\n")
|
|
|
|
output.write(" <functions>\n")
|
|
|
|
self.serialize_xrefs_functions(output)
|
|
|
|
output.write(" </functions>\n")
|
|
|
|
output.write(" <files>\n")
|
|
|
|
self.serialize_xrefs_files(output)
|
|
|
|
output.write(" </files>\n")
|
|
|
|
output.write(" <index>\n")
|
|
|
|
self.serialize_xrefs_index(output)
|
|
|
|
output.write(" </index>\n")
|
2005-12-01 17:34:21 +00:00
|
|
|
|
|
|
|
def serialize(self):
|
2011-02-16 14:09:09 +00:00
|
|
|
filename = "%s/%s-api.xml" % (self.path, self.name)
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
|
|
|
print "Saving XML description %s" % (filename)
|
2005-12-01 17:34:21 +00:00
|
|
|
output = open(filename, "w")
|
|
|
|
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
|
|
|
|
output.write("<api name='%s'>\n" % self.name)
|
|
|
|
output.write(" <files>\n")
|
|
|
|
headers = self.headers.keys()
|
|
|
|
headers.sort()
|
|
|
|
for file in headers:
|
|
|
|
self.serialize_exports(output, file)
|
|
|
|
output.write(" </files>\n")
|
|
|
|
output.write(" <symbols>\n")
|
|
|
|
macros = self.idx.macros.keys()
|
|
|
|
macros.sort()
|
|
|
|
for macro in macros:
|
|
|
|
self.serialize_macro(output, macro)
|
|
|
|
enums = self.idx.enums.keys()
|
|
|
|
enums.sort()
|
|
|
|
for enum in enums:
|
|
|
|
self.serialize_enum(output, enum)
|
|
|
|
typedefs = self.idx.typedefs.keys()
|
|
|
|
typedefs.sort()
|
|
|
|
for typedef in typedefs:
|
|
|
|
self.serialize_typedef(output, typedef)
|
|
|
|
variables = self.idx.variables.keys()
|
|
|
|
variables.sort()
|
|
|
|
for variable in variables:
|
|
|
|
self.serialize_variable(output, variable)
|
|
|
|
functions = self.idx.functions.keys()
|
|
|
|
functions.sort()
|
|
|
|
for function in functions:
|
|
|
|
self.serialize_function(output, function)
|
|
|
|
output.write(" </symbols>\n")
|
|
|
|
output.write("</api>\n")
|
|
|
|
output.close()
|
|
|
|
|
2013-01-29 14:35:28 +00:00
|
|
|
if self.errors > 0:
|
|
|
|
print >>sys.stderr, "apibuild.py: %d error(s) encountered during generation" % self.errors
|
|
|
|
sys.exit(3)
|
|
|
|
|
2011-02-16 14:09:09 +00:00
|
|
|
filename = "%s/%s-refs.xml" % (self.path, self.name)
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
|
|
|
print "Saving XML Cross References %s" % (filename)
|
2005-12-01 17:34:21 +00:00
|
|
|
output = open(filename, "w")
|
|
|
|
output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
|
|
|
|
output.write("<apirefs name='%s'>\n" % self.name)
|
|
|
|
self.serialize_xrefs(output)
|
|
|
|
output.write("</apirefs>\n")
|
|
|
|
output.close()
|
|
|
|
|
|
|
|
|
2011-09-09 10:55:21 +00:00
|
|
|
def rebuild(name):
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 13:15:19 +00:00
|
|
|
if name not in ["libvirt", "libvirt-qemu", "libvirt-lxc"]:
|
2012-10-11 16:31:20 +00:00
|
|
|
self.warning("rebuild() failed, unknown module %s") % name
|
2011-09-09 10:55:21 +00:00
|
|
|
return None
|
2005-12-01 17:34:21 +00:00
|
|
|
builder = None
|
2010-02-24 21:51:47 +00:00
|
|
|
srcdir = os.environ["srcdir"]
|
|
|
|
if glob.glob(srcdir + "/../src/libvirt.c") != [] :
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
2011-09-09 10:55:21 +00:00
|
|
|
print "Rebuilding API description for %s" % name
|
2011-02-16 14:09:09 +00:00
|
|
|
dirs = [srcdir + "/../src",
|
|
|
|
srcdir + "/../src/util",
|
|
|
|
srcdir + "/../include/libvirt"]
|
|
|
|
if glob.glob(srcdir + "/../include/libvirt/libvirt.h") == [] :
|
|
|
|
dirs.append("../include/libvirt")
|
2011-09-09 10:55:21 +00:00
|
|
|
builder = docBuilder(name, srcdir, dirs, [])
|
2006-02-09 18:03:54 +00:00
|
|
|
elif glob.glob("src/libvirt.c") != [] :
|
2011-05-12 10:19:42 +00:00
|
|
|
if not quiet:
|
2011-09-09 10:55:21 +00:00
|
|
|
print "Rebuilding API description for %s" % name
|
|
|
|
builder = docBuilder(name, srcdir,
|
2011-02-16 14:09:09 +00:00
|
|
|
["src", "src/util", "include/libvirt"],
|
2011-02-16 15:57:50 +00:00
|
|
|
[])
|
2005-12-01 17:34:21 +00:00
|
|
|
else:
|
2011-05-12 10:19:42 +00:00
|
|
|
self.warning("rebuild() failed, unable to guess the module")
|
2011-02-16 15:57:50 +00:00
|
|
|
return None
|
2005-12-01 17:34:21 +00:00
|
|
|
builder.scan()
|
|
|
|
builder.analyze()
|
|
|
|
builder.serialize()
|
|
|
|
return builder
|
|
|
|
|
|
|
|
#
|
|
|
|
# for debugging the parser
|
|
|
|
#
|
|
|
|
def parse(filename):
|
|
|
|
parser = CParser(filename)
|
|
|
|
idx = parser.parse()
|
|
|
|
return idx
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
if len(sys.argv) > 1:
|
|
|
|
debug = 1
|
|
|
|
parse(sys.argv[1])
|
|
|
|
else:
|
2011-09-09 10:55:21 +00:00
|
|
|
rebuild("libvirt")
|
|
|
|
rebuild("libvirt-qemu")
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 13:15:19 +00:00
|
|
|
rebuild("libvirt-lxc")
|
2011-05-12 10:19:42 +00:00
|
|
|
if warnings > 0:
|
|
|
|
sys.exit(2)
|
|
|
|
else:
|
|
|
|
sys.exit(0)
|