summaryrefslogtreecommitdiff
path: root/gr-utils/python
diff options
context:
space:
mode:
authorjapm48 <japm48@users.noreply.github.com>2020-03-26 16:01:00 +0100
committerMichael Dickens <michael.dickens@ettus.com>2020-04-10 14:57:01 -0400
commit4c4a85f2ebc43c9b7b3d5fdb8ad8387eae735f5f (patch)
tree10b5aa2671aafb9e393af7538177d2e48f60780d /gr-utils/python
parent78955a24c6c24f92f7b22ab9223e8e0b60b1dc4b (diff)
gr-utils: restructure {mod,block}tool folders
Move modtool and blocktool outside of the python folder, as per issue #2462.
Diffstat (limited to 'gr-utils/python')
-rw-r--r--gr-utils/python/blocktool/CMakeLists.txt20
-rw-r--r--gr-utils/python/blocktool/README.blocktool28
-rw-r--r--gr-utils/python/blocktool/__init__.py11
-rw-r--r--gr-utils/python/blocktool/__main__.py18
-rw-r--r--gr-utils/python/blocktool/cli.py130
-rw-r--r--gr-utils/python/blocktool/core/CMakeLists.txt19
-rw-r--r--gr-utils/python/blocktool/core/Constants.py55
-rw-r--r--gr-utils/python/blocktool/core/__init__.py0
-rw-r--r--gr-utils/python/blocktool/core/base.py41
-rw-r--r--gr-utils/python/blocktool/core/comments.py258
-rw-r--r--gr-utils/python/blocktool/core/iosignature.py182
-rw-r--r--gr-utils/python/blocktool/core/outputschema.py157
-rw-r--r--gr-utils/python/blocktool/core/parseheader.py271
-rw-r--r--gr-utils/python/blocktool/tests/README.blocktool_test12
-rw-r--r--gr-utils/python/blocktool/tests/sample_json/analog_agc2_cc.json131
-rw-r--r--gr-utils/python/blocktool/tests/sample_json/digital_additive_scrambler_bb.json95
-rw-r--r--gr-utils/python/blocktool/tests/sample_yaml/analog_agc2_cc.yml67
-rw-r--r--gr-utils/python/blocktool/tests/sample_yaml/digital_additive_scrambler_bb.yml63
-rw-r--r--gr-utils/python/blocktool/tests/test_blocktool.py187
-rw-r--r--gr-utils/python/blocktool/tests/test_json_file.py41
-rw-r--r--gr-utils/python/modtool/CMakeLists.txt38
-rw-r--r--gr-utils/python/modtool/README.modtool29
-rw-r--r--gr-utils/python/modtool/__init__.py0
-rw-r--r--gr-utils/python/modtool/cli/CMakeLists.txt22
-rw-r--r--gr-utils/python/modtool/cli/__init__.py15
-rw-r--r--gr-utils/python/modtool/cli/add.py140
-rw-r--r--gr-utils/python/modtool/cli/base.py158
-rw-r--r--gr-utils/python/modtool/cli/disable.py40
-rw-r--r--gr-utils/python/modtool/cli/info.py30
-rw-r--r--gr-utils/python/modtool/cli/makeyaml.py72
-rw-r--r--gr-utils/python/modtool/cli/newmod.py58
-rw-r--r--gr-utils/python/modtool/cli/rename.py71
-rw-r--r--gr-utils/python/modtool/cli/rm.py40
-rw-r--r--gr-utils/python/modtool/cli/update.py50
-rw-r--r--gr-utils/python/modtool/core/CMakeLists.txt22
-rw-r--r--gr-utils/python/modtool/core/__init__.py22
-rw-r--r--gr-utils/python/modtool/core/add.py307
-rw-r--r--gr-utils/python/modtool/core/base.py202
-rw-r--r--gr-utils/python/modtool/core/disable.py161
-rw-r--r--gr-utils/python/modtool/core/info.py132
-rw-r--r--gr-utils/python/modtool/core/makeyaml.py353
-rw-r--r--gr-utils/python/modtool/core/newmod.py89
-rw-r--r--gr-utils/python/modtool/core/rename.py175
-rw-r--r--gr-utils/python/modtool/core/rm.py167
-rw-r--r--gr-utils/python/modtool/core/update.py101
-rw-r--r--gr-utils/python/modtool/modtool.conf.in6
-rw-r--r--gr-utils/python/modtool/templates/CMakeLists.txt19
-rw-r--r--gr-utils/python/modtool/templates/__init__.py14
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/CMakeLists.txt156
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/MANIFEST.md17
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/apps/CMakeLists.txt14
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/CMakeParseArgumentsCopy.cmake138
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/howtoConfig.cmake31
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/targetConfig.cmake.in14
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/cmake/cmake_uninstall.cmake.in32
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/CMakeLists.txt24
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/README.howto11
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/CMakeLists.txt41
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.in1896
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.swig_doc.in1864
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/__init__.py72
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/base.py210
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/doxyindex.py292
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/__init__.py8
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compound.py505
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compoundsuper.py8346
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/index.py79
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/indexsuper.py526
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/text.py46
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/group_defs.dox7
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/main_page.dox10
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/swig_doc.py320
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/examples/README4
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/grc/CMakeLists.txt11
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/include/howto/CMakeLists.txt15
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/include/howto/api.h22
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/lib/CMakeLists.txt71
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/python/CMakeLists.txt32
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/python/__init__.py23
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/swig/CMakeLists.txt54
-rw-r--r--gr-utils/python/modtool/templates/gr-newmod/swig/howto_swig.i12
-rw-r--r--gr-utils/python/modtool/templates/templates.py754
-rw-r--r--gr-utils/python/modtool/tests/CMakeLists.txt31
-rw-r--r--gr-utils/python/modtool/tests/__init__.py0
-rw-r--r--gr-utils/python/modtool/tests/test_modtool.py323
-rw-r--r--gr-utils/python/modtool/tools/CMakeLists.txt19
-rw-r--r--gr-utils/python/modtool/tools/__init__.py19
-rw-r--r--gr-utils/python/modtool/tools/cmakefile_editor.py140
-rw-r--r--gr-utils/python/modtool/tools/code_generator.py52
-rw-r--r--gr-utils/python/modtool/tools/grc_yaml_generator.py129
-rw-r--r--gr-utils/python/modtool/tools/parser_cc_block.py223
-rw-r--r--gr-utils/python/modtool/tools/scm.py223
-rw-r--r--gr-utils/python/modtool/tools/util_functions.py167
93 files changed, 0 insertions, 21302 deletions
diff --git a/gr-utils/python/blocktool/CMakeLists.txt b/gr-utils/python/blocktool/CMakeLists.txt
deleted file mode 100644
index 235f3e5666..0000000000
--- a/gr-utils/python/blocktool/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- __main__.py
- cli.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/blocktool
-)
-
-########################################################################
-# Add subdirectories
-########################################################################
-add_subdirectory(core)
diff --git a/gr-utils/python/blocktool/README.blocktool b/gr-utils/python/blocktool/README.blocktool
deleted file mode 100644
index 0183b0093c..0000000000
--- a/gr-utils/python/blocktool/README.blocktool
+++ /dev/null
@@ -1,28 +0,0 @@
-gr_blocktool: Also known as the block header parsing tool, this tool
- automatically parses any GNU Radio or OOT block header.
-
-Block header tool from the Command Line Interface
-=================================================
-
-* Parse any GNU Radio or OOT header file with just the file path as an input.
-* Parse a complete header directory with directory-path as the input.
-* Get the output in the form of a YAML or a JSON file.
-* Add blocktool comments automatically in the header file from the implementation file.
-
-
-Integration of blocktool with modtool
-=====================================
-
-* Blocktool API can also be called from modtool.
-* Modtool makeyaml subcommand along with -b flag and a
- file path as the input can be used to create YAML files for the GRC.
-* YAML output is much better using the blocktool API.
-
-
-Use of blocktool as an independent API
-======================================
-
-Blocktool can be also be used as an independent API which thus can be used to
-parse a block header file during runtime.
-* A single mandatory argument block header file path is required.
-
diff --git a/gr-utils/python/blocktool/__init__.py b/gr-utils/python/blocktool/__init__.py
deleted file mode 100644
index a8206b76df..0000000000
--- a/gr-utils/python/blocktool/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-
-from .core.parseheader import BlockHeaderParser
-from .core.outputschema import RESULT_SCHEMA
diff --git a/gr-utils/python/blocktool/__main__.py b/gr-utils/python/blocktool/__main__.py
deleted file mode 100644
index 97c2f77f49..0000000000
--- a/gr-utils/python/blocktool/__main__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-#
-""" main function to run the blocktool api from the command line. """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import sys
-from .cli import cli
-
-
-sys.exit(cli())
diff --git a/gr-utils/python/blocktool/cli.py b/gr-utils/python/blocktool/cli.py
deleted file mode 100644
index b5bf6390f1..0000000000
--- a/gr-utils/python/blocktool/cli.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to generate parsed header output data """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import sys
-import json
-import logging
-
-import click
-from click import ClickException
-
-from gnuradio.modtool.core import yaml_generator
-
-from .core.parseheader import BlockHeaderParser
-
-LOGGER = logging.getLogger(__name__)
-
-
-class BlockToolException(ClickException):
- """ Exception class for enhanced CLI interface """
-
- def show(self, file=None):
- """ displays the colored message """
- click.secho('BlockToolException: {}'.format(
- self.format_message()), fg='red')
-
-
-def run_blocktool(module):
- """Call the run function of the core modules."""
- try:
- module.run_blocktool()
- except BlockToolException as err:
- click.echo(err, file=sys.stderr)
- exit(1)
-
-
-@click.command('parseheader',
- short_help='Generate the parsed output for the header file or directory in a specified format')
-@click.argument('file-path', nargs=1)
-@click.option('--yaml', is_flag=True,
- help='If given, a YAML response will be printed, else default json will be printed')
-@click.option('-c', '--blocktool-comments', is_flag=True,
- help='blocktool helper comments will be added in the header file')
-@click.option('-o', '--output', is_flag=True,
- help='If given, a file with desired output format will be generated')
-@click.option('-I', '--include_paths', default=None,
- help='Comma separated list of include paths for header files')
-def cli(**kwargs):
- """
- Block header parsing tool.
- \b
- A tool that can be used to automatically parse the headers in GNU Radio project
- or the OOT modules
- """
- kwargs['modtool'] = False
- if os.path.isfile(kwargs['file_path']):
- parser = BlockHeaderParser(**kwargs)
- run_blocktool(parser)
- if kwargs['yaml']:
- parser.yaml = True
- yaml_generator(parser, **kwargs)
- else:
- parser.json_confirm = True
- json_generator(parser, **kwargs)
- elif os.path.isdir(kwargs['file_path']):
- parse_directory(**kwargs)
- else:
- raise BlockToolException('Invalid file or directory path.')
-
-
-def json_generator(parser, **kwargs):
- """
- Generate JSON file for the block header
- """
- header = parser.filename.split('.')[0]
- block = parser.modname.split('-')[-1]
- if kwargs['output']:
- json_file = os.path.join('.', block+'_'+header + '.json')
- with open(json_file, 'w') as _file:
- json.dump(parser.parsed_data, _file, indent=4)
- else:
- print(json.dumps(parser.parsed_data, indent=4))
-
-
-def parse_directory(**kwargs):
- """
- Get parsed json and yaml output for complete header directory
- """
- kwargs['output'] = True
- dir_path = kwargs['file_path']
- dir_path = os.path.abspath(dir_path)
- list_header = []
- dir_name = os.path.basename(dir_path)
- for _header in os.listdir(dir_path):
- if _header.endswith('.h') and os.path.isfile(os.path.join(dir_path, _header)):
- list_header.append(os.path.join(dir_path, _header))
- list_header = sorted(list_header)
- if list_header:
- for header_path in list_header:
- kwargs['file_path'] = header_path
- header = os.path.basename(header_path)
- try:
- parse_dir = BlockHeaderParser(**kwargs)
- parse_dir.yaml = True
- parse_dir.json = True
- run_blocktool(parse_dir)
- yaml_generator(parse_dir, **kwargs)
- if not kwargs['modtool']:
- json_generator(parse_dir, **kwargs)
- except:
- logging.basicConfig(level=logging.DEBUG,
- filename=os.path.join('.', dir_name+'_log.out'))
- logging.exception(
- 'Log for Exception raised for the header: {}\n'.format(header))
- click.secho('Parsing unsuccessful: {}'.format(
- header), fg='yellow')
- else:
- raise BlockToolException(
- 'Invalid directory! No header found to be parsed')
diff --git a/gr-utils/python/blocktool/core/CMakeLists.txt b/gr-utils/python/blocktool/core/CMakeLists.txt
deleted file mode 100644
index 697e477caf..0000000000
--- a/gr-utils/python/blocktool/core/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- base.py
- comments.py
- parseheader.py
- iosignature.py
- outputschema.py
- Constants.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/blocktool/core
-)
diff --git a/gr-utils/python/blocktool/core/Constants.py b/gr-utils/python/blocktool/core/Constants.py
deleted file mode 100644
index be79631b97..0000000000
--- a/gr-utils/python/blocktool/core/Constants.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" constants file """
-
-# Kernel Namespace
-KERNEL = 'kernel'
-
-# I/O Signature (Symbols and constants)
-IO_SIGNATURE = 'io_signature::'
-SIGNATURE_LIST = ['makev', 'make3', 'make2', 'make']
-MAKE = 'make'
-MAKE2 = 'make2'
-MAKE3 = 'make3'
-MAKEV = 'makev'
-
-
-# message ports id
-MESSAGE_INPUT = 'message_port_register_in'
-MESSAGE_OUTPUT = 'message_port_register_out'
-
-# Symbols and constants required for parsing
-GR = 'gr-'
-UTILS = 'utils'
-OPEN_BRACKET = '('
-CLOSE_BRACKET = ')'
-STRIP_SYMBOLS = ' ,:)'
-EXCLAMATION = '!'
-
-# Blocktool special comments
-BLOCKTOOL = '! BlockTool'
-END_BLOCKTOOL = 'EndTool !'
-INPUT_SIG = 'input_signature'
-OUTPUT_SIG = 'output_signature'
-INPUT_MIN = 'input_min_streams'
-INPUT_MAX = 'input_max_streams'
-OUTPUT_MIN = 'output_min_streams'
-OUTPUT_MAX = 'output_max_streams'
-INPUT_MAKE_SIZE = 'input_sizeof_stream_item'
-INPUT_MAKEV_SIZE = 'input_sizeof_stream_items'
-INPUT_MAKE_SIZE1 = 'input_sizeof_stream_item1'
-INPUT_MAKE_SIZE2 = 'input_sizeof_stream_item2'
-INPUT_MAKE_SIZE3 = 'input_sizeof_stream_item3'
-OUTPUT_MAKE_SIZE = 'output_sizeof_stream_item'
-OUTPUT_MAKEV_SIZE = 'output_sizeof_stream_items'
-OUTPUT_MAKE_SIZE1 = 'output_sizeof_stream_item1'
-OUTPUT_MAKE_SIZE2 = 'output_sizeof_stream_item2'
-OUTPUT_MAKE_SIZE3 = 'output_sizeof_stream_item3'
-INPUT_PORT = 'message_input'
-OUTPUT_PORT = 'message_output'
diff --git a/gr-utils/python/blocktool/core/__init__.py b/gr-utils/python/blocktool/core/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/gr-utils/python/blocktool/core/__init__.py
+++ /dev/null
diff --git a/gr-utils/python/blocktool/core/base.py b/gr-utils/python/blocktool/core/base.py
deleted file mode 100644
index 30c6577398..0000000000
--- a/gr-utils/python/blocktool/core/base.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Base class for the modules """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from abc import ABC, abstractmethod
-
-
-class BlockToolException(Exception):
- """ Standard exception for blocktool classes. """
- pass
-
-
-class BlockTool(ABC):
- """ Base class for all blocktool command classes. """
- name = 'base'
- description = None
-
- def __init__(self, modname=None, filename=None, targetdir=None,
- target_file=None, module=None, impldir=None, impl_file=None,
- yaml=False, json=False, include_paths=None, **kwargs):
- """ __init__ """
- pass
-
- def _validate(self):
- """ Validates the arguments """
- pass
-
- @abstractmethod
- def run_blocktool(self):
- """ Override this. """
- pass
diff --git a/gr-utils/python/blocktool/core/comments.py b/gr-utils/python/blocktool/core/comments.py
deleted file mode 100644
index d7919609bb..0000000000
--- a/gr-utils/python/blocktool/core/comments.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to read and add special blocktool comments in the public header """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import warnings
-
-from ..core import Constants
-
-
-def strip_symbols(line):
- """
- helper function to strip symbols
- from blocktool comment syntax
- """
- return line.split(':')[-1].lstrip().rstrip()
-
-
-def exist_comments(self):
- """
- function to check if blocktool special comments
- already exist in the public header
- """
- _comments = True
- _index = None
- lines = []
- with open(self.target_file, 'r') as header:
- lines = header.readlines()
- for line in lines:
- if Constants.BLOCKTOOL in line:
- _index = lines.index(line)
- return bool(_index)
-
-
-def validate_message_port(self, message_ports, suppress_input, suppress_output):
- """
- function to solve conflicts if any in the
- *message_port* comments and the implementation information
- """
- if message_ports['input'] != self.parsed_data['message_port']['input']:
- if not suppress_input:
- warnings.warn(
- 'Conflict in values input message port Id. Add ! at the start of the key-value line to mandatory use the comment value.')
- self.parsed_data['message_port']['input'] = message_ports['input']
- if message_ports['output'] != self.parsed_data['message_port']['output']:
- if not suppress_output:
- warnings.warn(
- 'Conflict in values output message port Id. Add ! at the start of the key-value line to mandatory use the comment value.')
- self.parsed_data['message_port']['output'] = message_ports['output']
-
-
-def read_comments(self):
- """
- function to read special blocktool comments
- in the public header
- """
- temp_parsed_data = {}
- if self.parsed_data['io_signature'] or self.parsed_data['message_port']:
- temp_parsed_data['io_signature'] = self.parsed_data['io_signature']
- temp_parsed_data['message_port'] = self.parsed_data['message_port']
- self.parsed_data['io_signature'] = {
- "input": {
- "signature": None
- },
- "output": {
- "signature": None
- }
- }
- self.parsed_data['message_port'] = {
- "input": [],
- "output": []
- }
- _suppress_input = False
- _suppress_output = False
- parsed_io = self.parsed_data['io_signature']
- message_port = self.parsed_data['message_port']
- special_comments = []
- _index = None
- lines = []
- with open(self.target_file, 'r') as header:
- lines = header.readlines()
- for line in lines:
- if Constants.BLOCKTOOL in line:
- _index = lines.index(line)
-
- if _index is not None:
- _index = _index+1
- for num in range(_index, len(lines)):
- if Constants.END_BLOCKTOOL in lines[num]:
- break
- special_comments.append(lines[num])
- for comment in special_comments:
- if Constants.INPUT_SIG in comment:
- parsed_io['input']['signature'] = strip_symbols(comment)
- if Constants.INPUT_MAX in comment:
- parsed_io['input']['max_streams'] = strip_symbols(comment)
- if Constants.INPUT_MIN in comment:
- parsed_io['input']['min_streams'] = strip_symbols(comment)
- if parsed_io['input']['signature'] is Constants.MAKE and not None:
- if Constants.INPUT_MAKE_SIZE in comment:
- parsed_io['input']['sizeof_stream_item'] = strip_symbols(
- comment)
- elif parsed_io['input']['signature'] is Constants.MAKE2 and not None:
- if Constants.INPUT_MAKE_SIZE1 in comment:
- parsed_io['input']['sizeof_stream_item1'] = strip_symbols(
- comment)
- if Constants.INPUT_MAKE_SIZE2 in comment:
- parsed_io['input']['sizeof_stream_item2'] = strip_symbols(
- comment)
- elif parsed_io['input']['signature'] is Constants.MAKE3 and not None:
- if Constants.INPUT_MAKE_SIZE1 in comment:
- parsed_io['input']['sizeof_stream_item1'] = strip_symbols(
- comment)
- if Constants.INPUT_MAKE_SIZE2 in comment:
- parsed_io['input']['sizeof_stream_item2'] = strip_symbols(
- comment)
- if Constants.INPUT_MAKE_SIZE3 in comment:
- parsed_io['input']['sizeof_stream_item3'] = strip_symbols(
- comment)
- elif parsed_io['input']['signature'] is Constants.MAKEV and not None:
- if Constants.INPUT_MAKEV_SIZE in comment:
- parsed_io['input']['sizeof_stream_items'] = strip_symbols(
- comment)
-
- if Constants.OUTPUT_SIG in comment:
- parsed_io['output']['signature'] = strip_symbols(comment)
- if Constants.OUTPUT_MAX in comment:
- parsed_io['output']['max_streams'] = strip_symbols(comment)
- if Constants.OUTPUT_MIN in comment:
- parsed_io['output']['min_streams'] = strip_symbols(comment)
- if parsed_io['output']['signature'] is Constants.MAKE and not None:
- if Constants.OUTPUT_MAKE_SIZE in comment:
- parsed_io['output']['sizeof_stream_item'] = strip_symbols(
- comment)
- elif parsed_io['output']['signature'] is Constants.MAKE2:
- if Constants.OUTPUT_MAKE_SIZE1 in comment:
- parsed_io['output']['sizeof_stream_item1'] = strip_symbols(
- comment)
- if Constants.OUTPUT_MAKE_SIZE2 in comment:
- parsed_io['output']['sizeof_stream_item2'] = strip_symbols(
- comment)
- elif parsed_io['output']['signature'] is Constants.MAKE3 and not None:
- if Constants.OUTPUT_MAKE_SIZE1 in comment:
- parsed_io['output']['sizeof_stream_item1'] = strip_symbols(
- comment)
- if Constants.OUTPUT_MAKE_SIZE2 in comment:
- parsed_io['output']['sizeof_stream_item2'] = strip_symbols(
- comment)
- if Constants.OUTPUT_MAKE_SIZE3 in comment:
- parsed_io['output']['sizeof_stream_item3'] = strip_symbols(
- comment)
- elif parsed_io['output']['signature'] is Constants.MAKEV and not None:
- if Constants.OUTPUT_MAKEV_SIZE in comment:
- parsed_io['output']['sizeof_stream_items'] = strip_symbols(
- comment)
-
- if Constants.INPUT_PORT in comment:
- if Constants.EXCLAMATION in comment:
- _suppress_input = True
- if strip_symbols(comment):
- message_port['input'] = strip_symbols(comment).split(', ')
- if Constants.OUTPUT_PORT in comment:
- if Constants.EXCLAMATION in comment:
- _suppress_output = True
- if strip_symbols(comment):
- message_port['output'] = strip_symbols(comment).split(', ')
- validate_message_port(
- self, temp_parsed_data['message_port'], _suppress_input, _suppress_output)
- self.parsed_data['io_signature'] = temp_parsed_data['io_signature']
-
-
-def add_comments(self):
- """
- function to add special blocktool comments
- in the public header
- """
- _index = None
- lines = []
- parsed_io = self.parsed_data['io_signature']
- message_port = self.parsed_data['message_port']
- with open(self.target_file, 'r') as header:
- lines = header.readlines()
- for line in lines:
- if Constants.BLOCKTOOL in line:
- _index = lines.index(line)
- if _index is None:
- with open(self.target_file, 'a') as header:
- header.write('\n')
- header.write('/* '+Constants.BLOCKTOOL + '\n')
- header.write('input_signature: ' +
- parsed_io['input']['signature'] + '\n')
- header.write('input_min_streams: ' +
- parsed_io['input']['min_streams'] + '\n')
- header.write('input_max_streams: ' +
- parsed_io['input']['max_streams'] + '\n')
- if parsed_io['input']['signature'] is Constants.MAKE:
- header.write('input_sizeof_stream_item: ' +
- parsed_io['input']['sizeof_stream_item'] + '\n')
- elif parsed_io['input']['signature'] is Constants.MAKE2:
- header.write('input_sizeof_stream_item1: ' +
- parsed_io['input']['sizeof_stream_item1'] + '\n')
- header.write('input_sizeof_stream_item2: ' +
- parsed_io['input']['sizeof_stream_item2'] + '\n')
- elif parsed_io['input']['signature'] is Constants.MAKE3:
- header.write('input_sizeof_stream_item1: ' +
- parsed_io['input']['sizeof_stream_item1'] + '\n')
- header.write('input_sizeof_stream_item2: ' +
- parsed_io['input']['sizeof_stream_item2'] + '\n')
- header.write('input_sizeof_stream_item3: ' +
- parsed_io['input']['sizeof_stream_item3'] + '\n')
- elif parsed_io['input']['signature'] is Constants.MAKEV:
- header.write('input_sizeof_stream_item: ' +
- parsed_io['input']['sizeof_stream_items'] + '\n')
- header.write('output_signature: ' +
- parsed_io['output']['signature'] + '\n')
- header.write('output_min_streams: ' +
- parsed_io['output']['min_streams'] + '\n')
- header.write('output_max_streams: ' +
- parsed_io['output']['max_streams'] + '\n')
- if parsed_io['output']['signature'] is Constants.MAKE:
- header.write('output_sizeof_stream_item: ' +
- parsed_io['output']['sizeof_stream_item'] + '\n')
- elif parsed_io['output']['signature'] is Constants.MAKE2:
- header.write('output_sizeof_stream_item1: ' +
- parsed_io['output']['sizeof_stream_item1'] + '\n')
- header.write('output_sizeof_stream_item2: ' +
- parsed_io['output']['sizeof_stream_item2'] + '\n')
- elif parsed_io['output']['signature'] is Constants.MAKE3:
- header.write('output_sizeof_stream_item1: ' +
- parsed_io['output']['sizeof_stream_item1'] + '\n')
- header.write('output_sizeof_stream_item2: ' +
- parsed_io['output']['sizeof_stream_item2'] + '\n')
- header.write('output_sizeof_stream_item3: ' +
- parsed_io['output']['sizeof_stream_item3'] + '\n')
- elif parsed_io['output']['signature'] is Constants.MAKEV:
- header.write('output_sizeof_stream_item: ' +
- parsed_io['output']['sizeof_stream_items'] + '\n')
-
- if message_port['input']:
- header.write('message_input: ' +
- ', '.join(message_port['input']) + '\n')
- else:
- header.write('message_input: ' + '\n')
- if message_port['output']:
- header.write('message_output: ' +
- ', '.join(message_port['output']) + '\n')
- else:
- header.write('message_output: ' + '\n')
- header.write(Constants.END_BLOCKTOOL + '*/' + '\n')
diff --git a/gr-utils/python/blocktool/core/iosignature.py b/gr-utils/python/blocktool/core/iosignature.py
deleted file mode 100644
index bf3f04d781..0000000000
--- a/gr-utils/python/blocktool/core/iosignature.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to get io_signature of the header block """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import itertools
-import logging
-import string
-
-from ..core import Constants
-
-LOGGER = logging.getLogger(__name__)
-
-
-def io_signature(impl_file):
- """
- function to generate the io_signature of the block
- : returns the io parmaters
- """
- parsed_io = {
- "input": {
- "signature": None
- },
- "output": {
- "signature": None
- }
- }
- with open(impl_file, 'r') as impl:
- io_lines = []
- for line in impl:
- if Constants.IO_SIGNATURE in line:
- io_lines.append(line)
- if len(io_lines) > 2:
- io_lines = io_lines[0:2]
- _io_sig = []
- for line in io_lines:
- if Constants.IO_SIGNATURE in line:
- line = line.lstrip().rstrip().split(Constants.IO_SIGNATURE)
- _io_sig.append(line)
- _io_sig = list(itertools.chain.from_iterable(_io_sig))
- for index, _element in enumerate(_io_sig):
- _io_sig[index] = _element.lstrip().rstrip()
- if all(i in string.punctuation for i in _element):
- _io_sig.remove(_element)
- _io_sig = list(filter(None, _io_sig))
- io_func = []
- for _io in _io_sig:
- if Constants.MAKE in _io:
- io_func.append(_io.lstrip().rstrip(Constants.STRIP_SYMBOLS))
- for signature in Constants.SIGNATURE_LIST:
- if signature in io_func[0] and parsed_io['input']['signature'] is None:
- parsed_io['input']['signature'] = signature
- io_func[0] = io_func[0].lstrip(signature+' (')
- if signature in io_func[1] and parsed_io['output']['signature'] is None:
- parsed_io['output']['signature'] = signature
- io_func[1] = io_func[1].lstrip(signature+' (')
- io_elements = []
- for _io in io_func:
- _io = _io.split(',')
- io_elements.append(_io)
- io_elements = list(itertools.chain.from_iterable(io_elements))
- for index, _io in enumerate(io_elements):
- _io = _io.lstrip(' (').rstrip(' )')
- if Constants.OPEN_BRACKET in _io:
- _io = _io + Constants.CLOSE_BRACKET
- io_elements[index] = _io
-
- # Because of any possible combination of I/O signature and different number
- # of arguments, manual if else loop is required
- if parsed_io['input']['signature'] is Constants.MAKE:
- parsed_io['input']['min_streams'] = io_elements[0]
- parsed_io['input']['max_streams'] = io_elements[1]
- parsed_io['input']['sizeof_stream_item'] = io_elements[2]
- del io_elements[0:3]
- elif parsed_io['input']['signature'] is Constants.MAKE2:
- parsed_io['input']['min_streams'] = io_elements[0]
- parsed_io['input']['max_streams'] = io_elements[1]
- parsed_io['input']['sizeof_stream_item1'] = io_elements[2]
- parsed_io['input']['sizeof_stream_item2'] = io_elements[3]
- del io_elements[0:4]
- elif parsed_io['input']['signature'] is Constants.MAKE3:
- parsed_io['input']['min_streams'] = io_elements[0]
- parsed_io['input']['max_streams'] = io_elements[1]
- parsed_io['input']['sizeof_stream_item1'] = io_elements[2]
- parsed_io['input']['sizeof_stream_item2'] = io_elements[3]
- parsed_io['input']['sizeof_stream_item3'] = io_elements[4]
- del io_elements[0:5]
- elif parsed_io['input']['signature'] is Constants.MAKEV:
- parsed_io['input']['min_streams'] = io_elements[0]
- parsed_io['input']['max_streams'] = io_elements[1]
- parsed_io['input']['sizeof_stream_items'] = io_elements[2]
- del io_elements[0:3]
-
- if parsed_io['output']['signature'] is Constants.MAKE:
- parsed_io['output']['min_streams'] = io_elements[0]
- parsed_io['output']['max_streams'] = io_elements[1]
- parsed_io['output']['sizeof_stream_item'] = io_elements[2]
- del io_elements[0:3]
- elif parsed_io['output']['signature'] is Constants.MAKE2:
- parsed_io['output']['min_streams'] = io_elements[0]
- parsed_io['output']['max_streams'] = io_elements[1]
- parsed_io['output']['sizeof_stream_item1'] = io_elements[2]
- parsed_io['output']['sizeof_stream_item2'] = io_elements[3]
- del io_elements[0:4]
- elif parsed_io['output']['signature'] is Constants.MAKE3:
- parsed_io['output']['min_streams'] = io_elements[0]
- parsed_io['output']['max_streams'] = io_elements[1]
- parsed_io['output']['sizeof_stream_item1'] = io_elements[2]
- parsed_io['output']['sizeof_stream_item2'] = io_elements[3]
- parsed_io['output']['sizeof_stream_item3'] = io_elements[4]
- del io_elements[0:5]
- elif parsed_io['output']['signature'] is Constants.MAKEV:
- parsed_io['output']['min_streams'] = io_elements[0]
- parsed_io['output']['max_streams'] = io_elements[1]
- parsed_io['output']['sizeof_stream_items'] = io_elements[2]
- del io_elements[0:3]
- return parsed_io
-
-
-def message_port(impl_file):
- """
- parses message ports from implementation file
- """
- parsed_message_port = {
- "input": [],
- "output": []
- }
- with open(impl_file, 'r') as impl:
- _input = []
- _output = []
- for line in impl:
- if Constants.MESSAGE_INPUT in line:
- _input.append(line)
- if Constants.MESSAGE_OUTPUT in line:
- _output.append(line)
-
- input_port = []
- output_port = []
- if _input:
- for port in _input:
- port = port.lstrip().rstrip().strip(Constants.MESSAGE_INPUT)
- pattern = port.find('\"')
- if pattern != -1:
- if re.findall(r'"([^"]*)"', port)[0]:
- input_port.append(re.findall(r'"([^"]*)"', port)[0])
- else:
- input_port.append(port[port.find('(')+1:port.rfind(')')])
- _temp_port = ''.join(map(str, input_port))
- input_port.clear()
- input_port.append(_temp_port)
-
- if _output:
- for port in _output:
- port = port.lstrip().rstrip().strip(Constants.MESSAGE_OUTPUT)
- pattern = port.find('\"')
- if pattern != -1:
- if re.findall(r'"([^"]*)"', port)[0]:
- output_port.append(re.findall(r'"([^"]*)"', port)[0])
- else:
- output_port.append(port[port.find('(')+1:port.rfind(')')])
- _temp_port = ''.join(map(str, output_port))
- output_port.clear()
- output_port.append(_temp_port)
-
- if input_port:
- for port in input_port:
- parsed_message_port['input'].append(port)
-
- if output_port:
- for port in output_port:
- parsed_message_port['output'].append(port)
- return parsed_message_port
diff --git a/gr-utils/python/blocktool/core/outputschema.py b/gr-utils/python/blocktool/core/outputschema.py
deleted file mode 100644
index 4d1bcf88b7..0000000000
--- a/gr-utils/python/blocktool/core/outputschema.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Schema to be strictly followed be parsed header output """
-
-
-RESULT_SCHEMA = {
- "title": "JSON SCHEMA TO BE FOLLOWED BY BLOCK HEADER PARSING TOOL",
- "description": "Schema designed for the header file parsed python dict output",
- "type": "object",
- "properties": {
- "namespace": {
- "description": "List of nested namspace",
- "type": "array",
- "minItems": 1,
- "uniqueItems": True,
- "items": {
- "type": "string",
- "minLength": 1
- }
- },
- "class": {
- "description": "Class name",
- "type": "string",
- "minLength": 1
- },
- "io_signature": {
- "description": "I/O signature",
- "type": "object",
- "properties": {
- "input": {
- "description": "Input ports",
- "type": "object"
- },
- "output": {
- "description": "Output ports",
- "type": "object"
- }
- },
- "required": ["input", "output"]
- },
- "make": {
- "description": "Make function",
- "type": "object",
- "properties": {
- "arguments": {
- "description": "Arguments of make function",
- "type": "array",
- "minItems": 1,
- "uniqueItems": True,
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "minLength": 1
- },
- "dtype": {
- "type": "string",
- "minLength": 1
- },
- "default": {
- "type": "string"
- }
- },
- "required": ["name"],
- "dependencies": {
- "name": [
- "dtype",
- "default"
- ]
- }
- }
- }
- }
- },
- "methods": {
- "description": "Setters",
- "type": "array",
- "minItems": 0,
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "minLength": 1
- },
- "arguments_type": {
- "type": "array",
- "uniqueItems": True,
- "properties": {
- "name": {
- "type": "string",
- "minLength": 1
- },
- "dtype": {
- "type": "string",
- "minLength": 1
- }
- },
- "required": ["name"],
- "dependencies": {
- "name": ["dtype"]
- }
- }
- },
- "required": ["name"]
- }
- },
- "properties": {
- "description": "Getters",
- "type": "array",
- "uniqueItems": True,
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "minLength": 1
- },
- "dtype": {
- "type": "string",
- "minLength": 1
- },
- "read_only": {
- "type": "boolean"
- }
- },
- "required": ["name"],
- "dependencies": {
- "name": [
- "dtype",
- "read_only"
- ]
- }
- }
- },
- "docstring": {
- "description": "documentation of the header file",
- "type": "array"
- }
- },
- "required": [
- "namespace",
- "class",
- "io_signature",
- "make",
- "methods",
- "properties",
- "docstring"
- ]
-}
diff --git a/gr-utils/python/blocktool/core/parseheader.py b/gr-utils/python/blocktool/core/parseheader.py
deleted file mode 100644
index 59d4ac33f3..0000000000
--- a/gr-utils/python/blocktool/core/parseheader.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to generate AST for the headers and parse it """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import codecs
-import logging
-
-from pygccxml import parser, declarations, utils
-
-from ..core.base import BlockToolException, BlockTool
-from ..core.iosignature import io_signature, message_port
-from ..core.comments import read_comments, add_comments, exist_comments
-from ..core import Constants
-
-LOGGER = logging.getLogger(__name__)
-
-
-class BlockHeaderParser(BlockTool):
- """
- : Single argument required: file_path
- file_path: enter path for the header block in any of GNU Radio module
- : returns the parsed header data in python dict
- : return dict keys: namespace, class, io_signature, make,
- properties, methods
- : Can be used as an CLI command or an extenal API
- """
- name = 'Block Parse Header'
- description = 'Create a parsed output from a block header file'
-
- def __init__(self, file_path=None, blocktool_comments=False, include_paths=None, **kwargs):
- """ __init__ """
- BlockTool.__init__(self, **kwargs)
- self.parsed_data = {}
- self.addcomments = blocktool_comments
- self.include_paths = None
- if (include_paths):
- self.include_paths = [p.strip() for p in include_paths.split(',')]
- if not os.path.isfile(file_path):
- raise BlockToolException('file does not exist')
- file_path = os.path.abspath(file_path)
- self.target_file = file_path
- self.initialize()
- self.validate()
-
- def initialize(self):
- """
- initialize all the required API variables
- """
- self.module = self.target_file
- for dirs in self.module:
- if not os.path.basename(self.module).startswith(Constants.GR):
- self.module = os.path.abspath(
- os.path.join(self.module, os.pardir))
- self.modname = os.path.basename(self.module)
- self.filename = os.path.basename(self.target_file)
- self.targetdir = os.path.dirname(self.target_file)
- for dirs in os.scandir(self.module):
- if dirs.is_dir():
- if dirs.path.endswith('lib'):
- self.impldir = dirs.path
- self.impl_file = os.path.join(self.impldir,
- self.filename.split('.')[0]+'_impl.cc')
-
- def validate(self):
- """ Override the Blocktool validate function """
- BlockTool._validate(self)
- if not self.filename.endswith('.h'):
- raise BlockToolException(
- 'Cannot parse a non-header file')
-
- def get_header_info(self):
- """
- PyGCCXML header code parser
- magic happens here!
- : returns the parsed header data in python dict
- : return dict keys: namespace, class, io_signature, make,
- properties, methods
- : Can be used as an CLI command or an extenal API
- """
- gr = self.modname.split('-')[0]
- module = self.modname.split('-')[-1]
- generator_path, generator_name = utils.find_xml_generator()
- xml_generator_config = parser.xml_generator_configuration_t(
- xml_generator_path=generator_path,
- xml_generator=generator_name,
- include_paths=self.include_paths,
- compiler='gcc',
- define_symbols=['BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC'],
- cflags='-std=c++11')
- decls = parser.parse(
- [self.target_file], xml_generator_config)
- global_namespace = declarations.get_global_namespace(decls)
-
- # namespace
- try:
- self.parsed_data['namespace'] = []
- ns = global_namespace.namespace(gr)
- if ns is None:
- raise BlockToolException
- main_namespace = ns.namespace(module)
- if main_namespace is None:
- raise BlockToolException('namespace cannot be none')
- self.parsed_data['namespace'] = [gr, module]
- if main_namespace.declarations:
- for _namespace in main_namespace.declarations:
- if isinstance(_namespace, declarations.namespace_t):
- if Constants.KERNEL not in str(_namespace):
- main_namespace = _namespace
- self.parsed_data['namespace'].append(
- str(_namespace).split('::')[-1].split(' ')[0])
- except RuntimeError:
- raise BlockToolException(
- 'Invalid namespace format in the block header file')
-
- # class
- try:
- self.parsed_data['class'] = ''
- for _class in main_namespace.declarations:
- if isinstance(_class, declarations.class_t):
- main_class = _class
- self.parsed_data['class'] = str(_class).split('::')[
- 2].split(' ')[0]
- except RuntimeError:
- raise BlockToolException(
- 'Block header namespace {} must consist of a valid class instance'.format(module))
-
- # io_signature, message_ports
- self.parsed_data['io_signature'] = {}
- self.parsed_data['message_port'] = {}
- if os.path.isfile(self.impl_file) and exist_comments(self):
- self.parsed_data['io_signature'] = io_signature(
- self.impl_file)
- self.parsed_data['message_port'] = message_port(
- self.impl_file)
- read_comments(self)
- elif os.path.isfile(self.impl_file) and not exist_comments(self):
- self.parsed_data['io_signature'] = io_signature(
- self.impl_file)
- self.parsed_data['message_port'] = message_port(
- self.impl_file)
- if self.addcomments:
- add_comments(self)
- elif not os.path.isfile(self.impl_file) and exist_comments(self):
- read_comments(self)
- else:
- self.parsed_data['io_signature'] = {
- "input": [],
- "output": []
- }
- self.parsed_data['message_port'] = self.parsed_data['io_signature']
-
- # make
- try:
- self.parsed_data['make'] = {}
- self.parsed_data['make']['arguments'] = []
- query_m = declarations.custom_matcher_t(
- lambda mem_fun: mem_fun.name.startswith('make'))
- query_make = query_m & declarations.access_type_matcher_t('public')
- make_func = main_class.member_functions(function=query_make,
- allow_empty=True,
- header_file=self.target_file)
- criteria = declarations.calldef_matcher(name='make')
- _make_fun = declarations.matcher.get_single(criteria, main_class)
- _make_fun = str(_make_fun).split(
- 'make')[-1].split(')')[0].split('(')[1].lstrip().rstrip().split(',')
- if make_func:
- for arg in make_func[0].arguments:
- for _arg in _make_fun:
- if str(arg.name) in _arg:
- make_arguments = {
- "name": str(arg.name),
- "dtype": str(arg.decl_type),
- "default": ""
- }
- if re.findall(r'[-+]?\d*\.\d+|\d+', _arg):
- make_arguments['default'] = re.findall(
- r'[-+]?\d*\.\d+|\d+', _arg)[0]
- elif re.findall(r'\"(.+?)\"', _arg):
- make_arguments['default'] = re.findall(
- r'\"(.+?)\"', _arg)[0]
- elif "true" in _arg:
- make_arguments['default'] = "True"
- elif "false" in _arg:
- make_arguments['default'] = "False"
- self.parsed_data['make']['arguments'].append(
- make_arguments.copy())
- except RuntimeError:
- self.parsed_data['make'] = {}
- self.parsed_data['make']['arguments'] = []
-
- # setters
- try:
- self.parsed_data['methods'] = []
- query_methods = declarations.access_type_matcher_t('public')
- setters = main_class.member_functions(function=query_methods,
- allow_empty=True,
- header_file=self.target_file)
- getter_arguments = []
- if setters:
- for setter in setters:
- if str(setter.name).startswith('set_') and setter.arguments:
- setter_args = {
- "name": str(setter.name),
- "arguments_type": []
- }
- for argument in setter.arguments:
- args = {
- "name": str(argument.name),
- "dtype": str(argument.decl_type)
- }
- getter_arguments.append(args['name'])
- setter_args['arguments_type'].append(args.copy())
- self.parsed_data['methods'].append(setter_args.copy())
- except RuntimeError:
- self.parsed_data['methods'] = []
-
- # getters
- try:
- self.parsed_data['properties'] = []
- query_properties = declarations.access_type_matcher_t('public')
- getters = main_class.member_functions(function=query_properties,
- allow_empty=True,
- header_file=self.target_file)
- if getters:
- for getter in getters:
- if not getter.arguments or getter.has_const:
- getter_args = {
- "name": str(getter.name),
- "dtype": str(getter.return_type),
- "read_only": True
- }
- if getter_args['name'] in getter_arguments:
- getter_args["read_only"] = False
- self.parsed_data['properties'].append(
- getter_args.copy())
- except RuntimeError:
- self.parsed_data['properties'] = []
-
- # documentation
- try:
- _index = None
- header_file = codecs.open(self.target_file, 'r', 'cp932')
- self.parsed_data['docstring'] = re.compile(
- r'//.*?$|/\*.*?\*/', re.DOTALL | re.MULTILINE).findall(
- header_file.read())[2:]
- header_file.close()
- for doc in self.parsed_data['docstring']:
- if Constants.BLOCKTOOL in doc:
- _index = self.parsed_data['docstring'].index(doc)
- if _index is not None:
- self.parsed_data['docstring'] = self.parsed_data['docstring'][: _index]
- except:
- self.parsed_data['docstring'] = []
-
- return self.parsed_data
-
- def run_blocktool(self):
- """ Run, run, run. """
- self.get_header_info()
diff --git a/gr-utils/python/blocktool/tests/README.blocktool_test b/gr-utils/python/blocktool/tests/README.blocktool_test
deleted file mode 100644
index 1ca66e9c6e..0000000000
--- a/gr-utils/python/blocktool/tests/README.blocktool_test
+++ /dev/null
@@ -1,12 +0,0 @@
-gr_blocktool: Block Header parsing tool.
-Parses GNU Radio header files to generate YAML or JSON output.
-
-This directory consists of test for the parsed header output and Blocktool Exceptions.
-============================================================================================
-- Schema defined in the test will be strictly followed for every parsed JSON output file.
-
-Two sample response for header files are available in this directory:
-=====================================================================
-
-- sample_agc2_cc.json for public header file agc2_cc.h from gr-analog directory.
-- sample_additive_scrambler_bb.json for public header file additive_scrambler_bb.h from gr-digital directory. \ No newline at end of file
diff --git a/gr-utils/python/blocktool/tests/sample_json/analog_agc2_cc.json b/gr-utils/python/blocktool/tests/sample_json/analog_agc2_cc.json
deleted file mode 100644
index fa9eae3551..0000000000
--- a/gr-utils/python/blocktool/tests/sample_json/analog_agc2_cc.json
+++ /dev/null
@@ -1,131 +0,0 @@
-{
- "namespace": [
- "gr",
- "analog"
- ],
- "class": "agc2_cc",
- "io_signature": {
- "input": {
- "signature": "make",
- "min_streams": "1",
- "max_streams": "1",
- "sizeof_stream_item": "sizeof(gr_complex)"
- },
- "output": {
- "signature": "make",
- "min_streams": "1",
- "max_streams": "1",
- "sizeof_stream_item": "sizeof(gr_complex)"
- }
- },
- "message_port": {
- "input": [],
- "output": []
- },
- "make": {
- "arguments": [
- {
- "name": "attack_rate",
- "dtype": "float",
- "default": "0.10000000000000001"
- },
- {
- "name": "decay_rate",
- "dtype": "float",
- "default": "0.01"
- },
- {
- "name": "reference",
- "dtype": "float",
- "default": "1"
- },
- {
- "name": "gain",
- "dtype": "float",
- "default": "1"
- }
- ]
- },
- "methods": [
- {
- "name": "set_attack_rate",
- "arguments_type": [
- {
- "name": "rate",
- "dtype": "float"
- }
- ]
- },
- {
- "name": "set_decay_rate",
- "arguments_type": [
- {
- "name": "rate",
- "dtype": "float"
- }
- ]
- },
- {
- "name": "set_reference",
- "arguments_type": [
- {
- "name": "reference",
- "dtype": "float"
- }
- ]
- },
- {
- "name": "set_gain",
- "arguments_type": [
- {
- "name": "gain",
- "dtype": "float"
- }
- ]
- },
- {
- "name": "set_max_gain",
- "arguments_type": [
- {
- "name": "max_gain",
- "dtype": "float"
- }
- ]
- }
- ],
- "properties": [
- {
- "name": "attack_rate",
- "dtype": "float",
- "read_only": true
- },
- {
- "name": "decay_rate",
- "dtype": "float",
- "read_only": true
- },
- {
- "name": "reference",
- "dtype": "float",
- "read_only": false
- },
- {
- "name": "gain",
- "dtype": "float",
- "read_only": false
- },
- {
- "name": "max_gain",
- "dtype": "float",
- "read_only": false
- }
- ],
- "docstring": [
- "/*!\n * \\brief high performance Automatic Gain Control class with\n * attack and decay rates.\n * \\ingroup level_controllers_blk\n *\n * \\details\n * For Power the absolute value of the complex number is used.\n */",
- "// gr::analog::agc2_cc::sptr",
- "/*!\n * Build a complex value AGC loop block with attack and decay rates.\n *\n * \\param attack_rate the update rate of the loop when in attack mode.\n * \\param decay_rate the update rate of the loop when in decay mode.\n * \\param reference reference value to adjust signal power to.\n * \\param gain initial gain value.\n */",
- "/* namespace analog */",
- "/* namespace gr */",
- "/* INCLUDED_ANALOG_AGC2_CC_H */"
- ]
-} \ No newline at end of file
diff --git a/gr-utils/python/blocktool/tests/sample_json/digital_additive_scrambler_bb.json b/gr-utils/python/blocktool/tests/sample_json/digital_additive_scrambler_bb.json
deleted file mode 100644
index 265b5a6ddc..0000000000
--- a/gr-utils/python/blocktool/tests/sample_json/digital_additive_scrambler_bb.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
- "namespace": [
- "gr",
- "digital"
- ],
- "class": "additive_scrambler_bb",
- "io_signature": {
- "input": {
- "signature": "make",
- "min_streams": "1",
- "max_streams": "1",
- "sizeof_stream_item": "sizeof(unsigned char)"
- },
- "output": {
- "signature": "make",
- "min_streams": "1",
- "max_streams": "1",
- "sizeof_stream_item": "sizeof(unsigned char)"
- }
- },
- "message_port": {
- "input": [],
- "output": []
- },
- "make": {
- "arguments": [
- {
- "name": "mask",
- "dtype": "int",
- "default": ""
- },
- {
- "name": "seed",
- "dtype": "int",
- "default": ""
- },
- {
- "name": "len",
- "dtype": "int",
- "default": ""
- },
- {
- "name": "count",
- "dtype": "int",
- "default": "0"
- },
- {
- "name": "bits_per_byte",
- "dtype": "int",
- "default": "1"
- },
- {
- "name": "reset_tag_key",
- "dtype": "std::string const &",
- "default": ""
- }
- ]
- },
- "methods": [],
- "properties": [
- {
- "name": "mask",
- "dtype": "int",
- "read_only": true
- },
- {
- "name": "seed",
- "dtype": "int",
- "read_only": true
- },
- {
- "name": "len",
- "dtype": "int",
- "read_only": true
- },
- {
- "name": "count",
- "dtype": "int",
- "read_only": true
- },
- {
- "name": "bits_per_byte",
- "dtype": "int",
- "read_only": true
- }
- ],
- "docstring": [
- "/*!\n * \\ingroup coding_blk\n *\n * \\brief\n * Scramble an input stream using an LFSR.\n *\n * \\details\n * This block scrambles up to 8 bits per byte of the input\n * data stream, starting at the LSB.\n *\n * The scrambler works by XORing the incoming bit stream by the\n * output of the LFSR. Optionally, after \\p count bits have been\n * processed, the shift register is reset to the \\p seed value.\n * This allows processing fixed length vectors of samples.\n *\n * Alternatively, the LFSR can be reset using a reset tag to\n * scramble variable length vectors. However, it cannot be reset\n * between bytes.\n *\n * For details on configuring the LFSR, see gr::digital::lfsr.\n */",
- "// gr::digital::additive_scrambler_bb::sptr",
- "/*!\n * \\brief Create additive scrambler.\n *\n * \\param mask Polynomial mask for LFSR\n * \\param seed Initial shift register contents\n * \\param len Shift register length\n * \\param count Number of bytes after which shift register is reset, 0=never\n * \\param bits_per_byte Number of bits per byte\n * \\param reset_tag_key When a tag with this key is detected, the shift register is reset (when this is set, count is ignored!)\n */",
- "/* namespace digital */",
- "/* namespace gr */",
- "/* INCLUDED_DIGITAL_ADDITIVE_SCRAMBLER_BB_H */"
- ]
-} \ No newline at end of file
diff --git a/gr-utils/python/blocktool/tests/sample_yaml/analog_agc2_cc.yml b/gr-utils/python/blocktool/tests/sample_yaml/analog_agc2_cc.yml
deleted file mode 100644
index fef19eff95..0000000000
--- a/gr-utils/python/blocktool/tests/sample_yaml/analog_agc2_cc.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-id: analog_agc2_cc
-label: AGC2
-category: '[Analog]'
-flags: '[python, cpp]'
-templates:
- imports: from gnuradio import analog
- make: analog.agc2_cc(${attack_rate}, ${decay_rate}, ${reference}, ${gain}, ${max_gain})
- callbacks: !!python/tuple
- - set_attack_rate(${rate})
- - set_decay_rate(${rate})
- - set_reference(${reference})
- - set_gain(${gain})
- - set_max_gain(${max_gain})
-parameters:
-- id: attack_rate
- label: Attack_rate
- dtype: float
- read_only: true
-- id: decay_rate
- label: Decay_rate
- dtype: float
- read_only: true
-- id: reference
- label: Reference
- dtype: float
- read_only: false
-- id: gain
- label: Gain
- dtype: float
- read_only: false
-- id: max_gain
- label: Max_gain
- dtype: float
- read_only: false
-inputs:
-- domain: stream
- dtype: sizeof(gr_complex)
-outputs:
-- domain: stream
- dtype: sizeof(gr_complex)
-cpp_templates:
- includes: '#include <gnuradio/analog/agc2_cc.h>'
- declartions: analog::agc2_cc::sptr ${id}
- make: this->${id} = analog::agc2_cc::make(${attack_rate}, ${decay_rate}, ${reference},
- ${gain}, ${max_gain})
- callbacks: !!python/tuple
- - set_attack_rate(${rate})
- - set_decay_rate(${rate})
- - set_reference(${reference})
- - set_gain(${gain})
- - set_max_gain(${max_gain})
- link: gnuradio-analog
-documentation:
-- "/*!\n * \\brief high performance Automatic Gain Control class with\n *
- attack and decay rates.\n * \\ingroup level_controllers_blk\n *\n *
- \\details\n * For Power the absolute value of the complex number is used.\n
- \ */"
-- // gr::analog::agc2_cc::sptr
-- "/*!\n * Build a complex value AGC loop block with attack and decay rates.\n
- \ *\n * \\param attack_rate the update rate of the loop when in attack
- mode.\n * \\param decay_rate the update rate of the loop when in decay mode.\n
- \ * \\param reference reference value to adjust signal power to.\n *
- \\param gain initial gain value.\n */"
-- /* namespace analog */
-- /* namespace gr */
-- /* INCLUDED_ANALOG_AGC2_CC_H */
-file_format: 1
diff --git a/gr-utils/python/blocktool/tests/sample_yaml/digital_additive_scrambler_bb.yml b/gr-utils/python/blocktool/tests/sample_yaml/digital_additive_scrambler_bb.yml
deleted file mode 100644
index 0001653273..0000000000
--- a/gr-utils/python/blocktool/tests/sample_yaml/digital_additive_scrambler_bb.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-id: digital_additive_scrambler_bb
-label: ADDITIVE SCRAMBLER
-category: '[Digital]'
-flags: '[python, cpp]'
-templates:
- imports: from gnuradio import digital
- make: digital.additive_scrambler_bb(${mask}, ${seed}, ${len}, ${count}, ${bits_per_byte})
-parameters:
-- id: mask
- label: Mask
- dtype: int
- read_only: true
-- id: seed
- label: Seed
- dtype: int
- read_only: true
-- id: len
- label: Len
- dtype: int
- read_only: true
-- id: count
- label: Count
- dtype: int
- read_only: true
-- id: bits_per_byte
- label: Bits_per_byte
- dtype: int
- read_only: true
-inputs:
-- domain: stream
- dtype: sizeof(unsigned char)
-outputs:
-- domain: stream
- dtype: sizeof(unsigned char)
-cpp_templates:
- includes: '#include <gnuradio/digital/additive_scrambler_bb.h>'
- declartions: digital::additive_scrambler_bb::sptr ${id}
- make: this->${id} = digital::additive_scrambler_bb::make(${mask}, ${seed}, ${len},
- ${count}, ${bits_per_byte})
- link: gnuradio-digital
-documentation:
-- "/*!\n * \\ingroup coding_blk\n *\n * \\brief\n * Scramble an input
- stream using an LFSR.\n *\n * \\details\n * This block scrambles up
- to 8 bits per byte of the input\n * data stream, starting at the LSB.\n *\n
- \ * The scrambler works by XORing the incoming bit stream by the\n * output
- of the LFSR. Optionally, after \\p count bits have been\n * processed, the shift
- register is reset to the \\p seed value.\n * This allows processing fixed length
- vectors of samples.\n *\n * Alternatively, the LFSR can be reset using a
- reset tag to\n * scramble variable length vectors. However, it cannot be reset\n
- \ * between bytes.\n *\n * For details on configuring the LFSR, see gr::digital::lfsr.\n
- \ */"
-- // gr::digital::additive_scrambler_bb::sptr
-- "/*!\n * \\brief Create additive scrambler.\n *\n * \\param mask
- \ Polynomial mask for LFSR\n * \\param seed Initial shift register contents\n
- \ * \\param len Shift register length\n * \\param count Number of
- bytes after which shift register is reset, 0=never\n * \\param bits_per_byte
- Number of bits per byte\n * \\param reset_tag_key When a tag with this key
- is detected, the shift register is reset (when this is set, count is ignored!)\n
- \ */"
-- /* namespace digital */
-- /* namespace gr */
-- /* INCLUDED_DIGITAL_ADDITIVE_SCRAMBLER_BB_H */
-file_format: 1
diff --git a/gr-utils/python/blocktool/tests/test_blocktool.py b/gr-utils/python/blocktool/tests/test_blocktool.py
deleted file mode 100644
index 8c8a1686b1..0000000000
--- a/gr-utils/python/blocktool/tests/test_blocktool.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" unittest for gr-blocktool api """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import unittest
-import warnings
-try:
- import pygccxml
- SKIP_BLOCK_TEST = False
-except:
- SKIP_BLOCK_TEST = True
-
-try:
- import apt
- CACHE = apt.cache.Cache()
- CACHE.open()
-
- PKG = CACHE['castxml']
- if PKG.is_installed:
- SKIP_BLOCK_TEST = False
- else:
- SKIP_BLOCK_TEST = True
-except:
- SKIP_BLOCK_TEST = True
-
-from jsonschema import validate
-
-from blocktool import BlockHeaderParser
-from blocktool.core.base import BlockToolException
-from blocktool.core import Constants
-from blocktool import RESULT_SCHEMA
-
-
-class TestBlocktoolCore(unittest.TestCase):
- """ The Tests for blocktool core """
-
- def __init__(self, *args, **kwargs):
- super(TestBlocktoolCore, self).__init__(*args, **kwargs)
- self.module = os.path.abspath(os.path.join(os.path.dirname(__file__),
- '../../../../gr-analog'))
- self.test_dir = os.path.abspath(os.path.join(self.module,
- 'include/gnuradio/analog'))
-
- def is_int(self, number):
- """
- Check for int conversion
- """
- try:
- int(number)
- return True
- except ValueError:
- return False
-
- @classmethod
- def setUpClass(cls):
- """ create a temporary Blocktool object """
- try:
- warnings.simplefilter("ignore", ResourceWarning)
- except NameError:
- pass
- test_path = {}
- target_file = os.path.abspath(os.path.join(os.path.dirname(
- __file__), '../../../../gr-analog/include/gnuradio/analog', 'agc2_cc.h'))
- test_path['file_path'] = target_file
- cls.test_obj = BlockHeaderParser(**test_path).get_header_info()
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_blocktool_exceptions(self):
- """
- tests for blocktool exceptions
- """
- # test for non-existent header or invalid headers
- test_dict = {}
- test_dict['file_path'] = os.path.abspath(
- os.path.join(self.test_dir, 'sample.h'))
- with self.assertRaises(BlockToolException):
- BlockHeaderParser(**test_dict).run_blocktool()
- # test for invalid header file
- test_dict['file_path'] = os.path.abspath(
- os.path.join(self.test_dir, 'CMakeLists.txt'))
- if not os.path.basename(test_dict['file_path']).endswith('.h'):
- with self.assertRaises(BlockToolException):
- BlockHeaderParser(**test_dict).run_blocktool()
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_namespace(self):
- """ test for header namespace """
- module_name = os.path.basename(self.module)
- self.assertTrue(self.test_obj['namespace'][0] == 'gr')
- self.assertTrue(self.test_obj['namespace']
- [1] == module_name.split('-')[-1])
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_io_signature(self):
- """ test for io_signature """
- input_signature = self.test_obj['io_signature']['input']['signature']
- output_signature = self.test_obj['io_signature']['output']['signature']
- valid_signature = False
- if input_signature and output_signature in Constants.SIGNATURE_LIST:
- valid_signature = True
- self.assertTrue(valid_signature)
- valid_io_stream = False
- input_max = self.test_obj['io_signature']['input']['max_streams']
- input_min = self.test_obj['io_signature']['input']['min_streams']
- output_max = self.test_obj['io_signature']['output']['max_streams']
- output_min = self.test_obj['io_signature']['output']['min_streams']
- if self.is_int(input_max) and self.is_int(input_min) and self.is_int(output_max) and self.is_int(output_min):
- valid_io_stream = True
- self.assertTrue(valid_io_stream)
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_message_port(self):
- """ test for message ports """
- input_port = self.test_obj['message_port']['input']
- output_port = self.test_obj['message_port']['output']
- valid_input_message_port = True
- valid_output_message_port = True
- if input_port:
- for port in input_port:
- if not port['id']:
- valid_input_message_port = False
- if output_port:
- for port in output_port:
- if not port['id']:
- valid_output_message_port = False
- self.assertTrue(valid_input_message_port)
- self.assertTrue(valid_output_message_port)
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_factory_signature(self):
- """ test for factory signature in the header """
- valid_factory_arg = True
- if self.test_obj['make']['arguments']:
- for arguments in self.test_obj['make']['arguments']:
- if not arguments['name'] or not arguments['dtype']:
- valid_factory_arg = False
- self.assertTrue(valid_factory_arg)
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_methods(self):
- """ test for methods """
- valid_method = True
- if self.test_obj['methods']:
- for arguments in self.test_obj['methods']:
- if not arguments['name']:
- valid_method = False
- if arguments['arguments_type']:
- for args in arguments['arguments_type']:
- if not args['name'] or not args['dtype']:
- valid_method = False
- self.assertTrue(valid_method)
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_properties(self):
- """ test for properties """
- valid_properties = True
- if self.test_obj['properties']:
- for arguments in self.test_obj['properties']:
- if not arguments['name'] or not arguments['dtype']:
- valid_properties = False
- self.assertTrue(valid_properties)
-
- @unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
- def test_result_format(self):
- """ test for parsed blocktool output format """
- valid_schema = False
- try:
- validate(instance=self.test_obj, schema=RESULT_SCHEMA)
- valid_schema = True
- except BlockToolException:
- raise BlockToolException
- self.assertTrue(valid_schema)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/gr-utils/python/blocktool/tests/test_json_file.py b/gr-utils/python/blocktool/tests/test_json_file.py
deleted file mode 100644
index 7728e8b803..0000000000
--- a/gr-utils/python/blocktool/tests/test_json_file.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" testing the JSON files generated by gr-blocktool """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import sys
-import json
-import jsonschema
-
-from blocktool import RESULT_SCHEMA
-
-
-def is_valid():
- """ Validate json file """
-
- with open(sys.argv[1], 'r') as json_file:
- data = json.load(json_file)
- try:
- print("Validating...")
- jsonschema.validate(data, RESULT_SCHEMA)
- except jsonschema.ValidationError as exception:
- print("Record JSON file # {}: NOT OK".format(sys.argv[1]))
- raise Exception(exception)
- else:
- print("Record JSON file # {}: OK".format(sys.argv[1]))
-
-
-if __name__ == '__main__':
- if len(sys.argv) == 2:
- is_valid()
- else:
- raise Exception('Please input only one json file')
diff --git a/gr-utils/python/modtool/CMakeLists.txt b/gr-utils/python/modtool/CMakeLists.txt
deleted file mode 100644
index 4b88bae791..0000000000
--- a/gr-utils/python/modtool/CMakeLists.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/modtool
-)
-
-set(GR_PKG_MODTOOL_DATA_DIR ${GR_PKG_DATA_DIR}/modtool/templates)
-
-
-########################################################################
-# Create and install the modtool conf file
-########################################################################
-file(TO_NATIVE_PATH ${CMAKE_INSTALL_PREFIX}/${GR_PKG_MODTOOL_DATA_DIR}/gr-newmod newmoddir)
-
-configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/modtool.conf.in
- ${CMAKE_CURRENT_BINARY_DIR}/modtool.conf
-@ONLY)
-
-install(
- FILES ${CMAKE_CURRENT_BINARY_DIR}/modtool.conf
- DESTINATION ${GR_PREFSDIR}
-)
-
-########################################################################
-# Add subdirectories
-########################################################################
-add_subdirectory(core)
-add_subdirectory(cli)
-add_subdirectory(tools)
-add_subdirectory(templates)
-add_subdirectory(tests)
diff --git a/gr-utils/python/modtool/README.modtool b/gr-utils/python/modtool/README.modtool
deleted file mode 100644
index bffe49e9e2..0000000000
--- a/gr-utils/python/modtool/README.modtool
+++ /dev/null
@@ -1,29 +0,0 @@
-gr_modtool: Swiss Army Knife for editing GNU Radio modules and -components.
-
-Adding a new subcommand for Command Line Interface
-==================================================
-
-* Add a new file called SUBCOMMAND in the cli directory
-* Create a function cli with the decorator @click.command or @click.group
-* Add the necessary options for the command or command group
-* Add that file to __init__.py and CMakeLists.txt in the cli directory
-
-
-Adding a new subcommand for Exposing as an API
-==============================================
-
-* Add a new file called SUBCOMMAND in the core directory
-* Have a look at the other subcommands, it must inherit from ModTool
-* Add that file to __init__.py and CMakeLists.txt in the core directory
-
-
-The gr-newmod directory inside the templates directory
-======================================================
-
-This dir basically contains a copy of gr-howto-write-a-block from the gnuradio
-sources, with some differences:
-- All example blocks, apps, grc files (...) and references thereto in the
- CMake files are removed
-- In the top-level CMake file, the project is called 'gr-howto'.
-- Any time anything relevant is changed in gr-howto-write-a-block, it should
- be changed here, too.
diff --git a/gr-utils/python/modtool/__init__.py b/gr-utils/python/modtool/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/gr-utils/python/modtool/__init__.py
+++ /dev/null
diff --git a/gr-utils/python/modtool/cli/CMakeLists.txt b/gr-utils/python/modtool/cli/CMakeLists.txt
deleted file mode 100644
index f00adfb4d8..0000000000
--- a/gr-utils/python/modtool/cli/CMakeLists.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- add.py
- base.py
- disable.py
- info.py
- makeyaml.py
- newmod.py
- rm.py
- rename.py
- update.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/modtool/cli
-)
diff --git a/gr-utils/python/modtool/cli/__init__.py b/gr-utils/python/modtool/cli/__init__.py
deleted file mode 100644
index 65f3603113..0000000000
--- a/gr-utils/python/modtool/cli/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from .base import cli, common_params, block_name, ModToolException
-from .base import setup_cli_logger, cli_input
diff --git a/gr-utils/python/modtool/cli/add.py b/gr-utils/python/modtool/cli/add.py
deleted file mode 100644
index e65bac1176..0000000000
--- a/gr-utils/python/modtool/cli/add.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to add new blocks """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import getpass
-
-import click
-
-from ..core import ModToolAdd
-from ..tools import SequenceCompleter, ask_yes_no
-from .base import common_params, block_name, run, cli_input, ModToolException
-
-
-@click.command('add')
-@click.option('-t', '--block-type', type=click.Choice(ModToolAdd.block_types),
- help="One of {}.".format(', '.join(ModToolAdd.block_types)))
-@click.option('--license-file',
- help="File containing the license header for every source code file.")
-@click.option('--copyright',
- help="Name of the copyright holder (you or your company) MUST be a quoted string.")
-@click.option('--argument-list', default="",
- help="The argument list for the constructor and make functions.")
-@click.option('--add-python-qa', is_flag=True, default=None,
- help="If given, Python QA code is automatically added if possible.")
-@click.option('--add-cpp-qa', is_flag=True, default=None,
- help="If given, C++ QA code is automatically added if possible.")
-@click.option('--skip-cmakefiles', is_flag=True,
- help="If given, only source files are written, but CMakeLists.txt files are left unchanged.")
-@click.option('-l', '--lang', type=click.Choice(ModToolAdd.language_candidates),
- help="Programming Language")
-@common_params
-@block_name
-def cli(**kwargs):
- """Adds a block to the out-of-tree module."""
- kwargs['cli'] = True
- self = ModToolAdd(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- get_blocktype(self)
- get_lang(self)
- click.secho("Language: {}".format({'cpp': 'C++', 'python': 'Python'}[self.info['lang']]), fg='green')
- if ((self.skip_subdirs['lib'] and self.info['lang'] == 'cpp')
- or (self.skip_subdirs['python'] and self.info['lang'] == 'python')):
- raise ModToolException('Missing or skipping relevant subdir.')
- get_blockname(self)
- click.secho("Block/code identifier: " + self.info['blockname'], fg='green')
- self.info['fullblockname'] = self.info['modname'] + '_' + self.info['blockname']
- if not self.license_file:
- get_copyrightholder(self)
- self.info['license'] = self.setup_choose_license()
- get_arglist(self)
- get_py_qa(self)
- get_cpp_qa(self)
- if self.info['version'] == 'autofoo' and not self.skip_cmakefiles:
- click.secho("Warning: Autotools modules are not supported. "+
- "Files will be created, but Makefiles will not be edited.",
- fg='yellow')
- self.skip_cmakefiles = True
- run(self)
-
-def get_blocktype(self):
- """ Get the blocktype of the block to be added """
- if self.info['blocktype'] is None:
- click.secho(str(self.block_types), fg='yellow')
- with SequenceCompleter(self.block_types):
- while self.info['blocktype'] not in self.block_types:
- self.info['blocktype'] = cli_input("Enter block type: ")
- if self.info['blocktype'] not in self.block_types:
- click.secho('Must be one of ' + str(self.block_types), fg='yellow')
-
-def get_lang(self):
- """ Get the Programming Language of the block to be added """
- if self.info['lang'] is None:
- with SequenceCompleter(self.language_candidates):
- while self.info['lang'] not in self.language_candidates:
- self.info['lang'] = cli_input("Language (python/cpp): ")
- if self.info['lang'] == 'c++':
- self.info['lang'] = 'cpp'
-
-def get_blockname(self):
- """ Get the blockname"""
- if not self.info['blockname'] or self.info['blockname'].isspace():
- while not self.info['blockname'] or self.info['blockname'].isspace():
- self.info['blockname'] = cli_input("Enter name of block/code (without module name prefix): ")
- if not re.match('^[a-zA-Z0-9_]+$', self.info['blockname']):
- raise ModToolException('Invalid block name.')
-
-def get_copyrightholder(self):
- """ Get the copyrightholder of the block to be added """
- if not self.info['copyrightholder'] or self.info['copyrightholder'].isspace():
- user = getpass.getuser()
- git_user = self.scm.get_gituser()
- if git_user:
- copyright_candidates = (user, git_user, 'GNU Radio')
- else:
- copyright_candidates = (user, 'GNU Radio')
- with SequenceCompleter(copyright_candidates):
- self.info['copyrightholder'] = cli_input("Please specify the copyright holder: ")
- if not self.info['copyrightholder'] or self.info['copyrightholder'].isspace():
- self.info['copyrightholder'] = "gr-{} author".format(self.info['modname'])
- elif self.info['is_component']:
- click.secho("For GNU Radio components the FSF is added as copyright holder",
- fg='cyan')
-
-def get_arglist(self):
- """ Get the argument list of the block to be added """
- if self.info['arglist'] is not None:
- self.info['arglist'] = click.prompt(click.style(
- 'Enter valid argument list, including default arguments: \n',
- fg='cyan'),
- prompt_suffix='',
- default='',
- show_default=False)
-
-def get_py_qa(self):
- """ Get a boolean value for addition of py_qa """
- if self.add_py_qa is None:
- if not (self.info['blocktype'] in ('noblock') or self.skip_subdirs['python']):
- self.add_py_qa = ask_yes_no(click.style('Add Python QA code?', fg='cyan'), True)
- else:
- self.add_py_qa = False
-
-def get_cpp_qa(self):
- """ Get a boolean value for addition of cpp_qa """
- if self.add_cc_qa is None:
- if self.info['lang'] == 'cpp':
- self.add_cc_qa = ask_yes_no(click.style('Add C++ QA code?', fg='cyan'),
- not self.add_py_qa)
- else:
- self.add_cc_qa = False
diff --git a/gr-utils/python/modtool/cli/base.py b/gr-utils/python/modtool/cli/base.py
deleted file mode 100644
index 2462a71d88..0000000000
--- a/gr-utils/python/modtool/cli/base.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Base CLI module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import sys
-import logging
-import functools
-from importlib import import_module
-from pkg_resources import iter_entry_points
-from logging import Formatter, StreamHandler
-
-import click
-from click import ClickException
-from click_plugins import with_plugins
-
-from gnuradio import gr
-
-
-class ModToolException(ClickException):
- """ Exception class for enhanced CLI interface """
- def show(self, file = None):
- """ displays the colored message """
- click.secho('ModToolException: {}'.format(self.format_message()), fg='red')
-
-
-class CommandCLI(click.Group):
- """
- This is a derived class of the implemented click class
- which overrides some of the functional definitions for external
- plug-in support
- """
- cmd_folder = os.path.abspath(os.path.dirname(__file__))
-
- def list_commands(self, ctx):
- """
- Lists all the commands available in the modtool directory
- as well as the commands from external plug-ins.
- """
- cmds = []
- for filename in os.listdir(self.cmd_folder):
- if filename.endswith('.py') and not '_' in filename:
- cmds.append(filename[:-3])
- cmds.remove('base')
- cmds += self.commands
- return sorted(cmds)
-
- def get_command(self, ctx, cmd_name):
- """
- Returns a command object if it exists. The existing in-tree ModTool
- command is the priority over the same external plug-in command.
- """
- try:
- mod = import_module('gnuradio.modtool.cli.' + cmd_name)
- except ImportError:
- logging.error(ImportError)
- return self.commands.get(cmd_name)
- return mod.cli
-
-
-class ClickHandler(StreamHandler):
- """
- This is a derived class of implemented logging class
- StreamHandler which overrides some of its functional
- definitions to add colors to the stream output
- """
- def emit(self, record):
- """ Writes message to the stream """
- colormap = {
- 'DEBUG': ('white', 'black'),
- 'INFO': ('blue', None),
- 'WARNING': ('yellow', None),
- 'ERROR': ('red', None),
- 'CRITICAL': ('white', 'red'),
- }
- try:
- msg = self.format(record)
- colors = colormap.get(record.levelname, (None, None))
- fgcolor = colors[0]
- bgcolor = colors[1]
- click.secho(msg, fg=fgcolor, bg=bgcolor)
- self.flush()
- except Exception:
- self.handleError(record)
-
-
-def setup_cli_logger(logger):
- """ Sets up logger for CLI parsing """
- try:
- import colorama
- stream_handler = ClickHandler()
- logger.addHandler(stream_handler)
- except ImportError:
- stream_handler = logging.StreamHandler()
- logger.addHandler(stream_handler)
- finally:
- logger.setLevel(logging.INFO)
-
-
-def cli_input(msg):
- """ Returns enhanced input """
- return input(click.style(msg, fg='cyan'))
-
-
-def common_params(func):
- """ Common parameters for various modules"""
- @click.option('-d', '--directory', default='.',
- help="Base directory of the module. Defaults to the cwd.")
- @click.option('--skip-lib', is_flag=True,
- help="Don't do anything in the lib/ subdirectory.")
- @click.option('--skip-swig', is_flag=True,
- help="Don't do anything in the swig/ subdirectory.")
- @click.option('--skip-python', is_flag=True,
- help="Don't do anything in the python/ subdirectory.")
- @click.option('--skip-grc', is_flag=True,
- help="Don't do anything in the grc/ subdirectory.")
- @click.option('--scm-mode', type=click.Choice(['yes', 'no', 'auto']),
- default=gr.prefs().get_string('modtool', 'scm_mode', 'no'),
- help="Use source control management [ yes | no | auto ]).")
- @click.option('-y', '--yes', is_flag=True,
- help="Answer all questions with 'yes'. " +
- "This can overwrite and delete your files, so be careful.")
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- """ Decorator that wraps common options """
- return func(*args, **kwargs)
- return wrapper
-
-
-block_name = click.argument('blockname', nargs=1, required=False, metavar="BLOCK_NAME")
-
-
-@with_plugins(iter_entry_points('gnuradio.modtool.cli.plugins'))
-@click.command(cls=CommandCLI,
- epilog='Manipulate with GNU Radio modules source code tree. ' +
- 'Call it without options to run specified command interactively')
-def cli():
- """A tool for editing GNU Radio out-of-tree modules."""
- pass
-
-
-def run(module):
- """Call the run function of the core modules."""
- try:
- module.run()
- except ModToolException as err:
- click.echo(err, file=sys.stderr)
- exit(1)
diff --git a/gr-utils/python/modtool/cli/disable.py b/gr-utils/python/modtool/cli/disable.py
deleted file mode 100644
index 87f891e1c3..0000000000
--- a/gr-utils/python/modtool/cli/disable.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Disable blocks module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import click
-
-from ..core import get_block_candidates, ModToolDisable
-from ..tools import SequenceCompleter
-from .base import common_params, block_name, run, cli_input
-
-
-@click.command('disable', short_help=ModToolDisable.description)
-@common_params
-@block_name
-def cli(**kwargs):
- """Disable a block (comments out CMake entries for files)"""
- kwargs['cli'] = True
- self = ModToolDisable(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- get_pattern(self)
- run(self)
-
-def get_pattern(self):
- """ Get the regex pattern for block(s) to be disabled """
- if self.info['pattern'] is None:
- block_candidates = get_block_candidates()
- with SequenceCompleter(block_candidates):
- self.info['pattern'] = cli_input('Which blocks do you want to disable? (Regex): ')
- if not self.info['pattern'] or self.info['pattern'].isspace():
- self.info['pattern'] = '.'
diff --git a/gr-utils/python/modtool/cli/info.py b/gr-utils/python/modtool/cli/info.py
deleted file mode 100644
index df7a1f78b6..0000000000
--- a/gr-utils/python/modtool/cli/info.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Returns information about a module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import click
-
-from ..core import ModToolInfo
-from .base import common_params, run
-
-
-@click.command('info')
-@click.option('--python-readable', is_flag=True,
- help="Return the output in a format that's easier to read for Python scripts.")
-@click.option('--suggested-dirs',
- help="Suggest typical include dirs if nothing better can be detected.")
-@common_params
-def cli(**kwargs):
- """ Return information about a given module """
- self = ModToolInfo(**kwargs)
- run(self)
diff --git a/gr-utils/python/modtool/cli/makeyaml.py b/gr-utils/python/modtool/cli/makeyaml.py
deleted file mode 100644
index 834cc05a21..0000000000
--- a/gr-utils/python/modtool/cli/makeyaml.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Automatically create YAML bindings for GRC from block code """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import click
-
-try:
- from gnuradio.blocktool import BlockHeaderParser
- from gnuradio.blocktool.core.base import BlockToolException
-except ImportError:
- have_blocktool = False
-else:
- have_blocktool = True
-
-from ..core import get_block_candidates, ModToolMakeYAML, yaml_generator
-from ..tools import SequenceCompleter
-from .base import common_params, block_name, run, cli_input
-
-
-@click.command('makeyaml', short_help=ModToolMakeYAML.description)
-@click.option('-b', '--blocktool', is_flag=True,
- help='Use blocktool support to print yaml output. FILE PATH mandatory if used.')
-@click.option('-o', '--output', is_flag=True,
- help='If given, a file with desired output format will be generated')
-@common_params
-@block_name
-def cli(**kwargs):
- """
- \b
- Make an YAML file for GRC block bindings
-
- Note: This does not work on python blocks
- """
- kwargs['cli'] = True
- if kwargs['blocktool']:
- kwargs['modtool'] = True
- if kwargs['blockname'] is None:
- raise BlockToolException('Missing argument FILE PATH with blocktool flag')
- kwargs['file_path'] = os.path.abspath(kwargs['blockname'])
- if os.path.isfile(kwargs['file_path']):
- parse_yml = BlockHeaderParser(**kwargs)
- parse_yml.run_blocktool()
- parse_yml.cli = True
- parse_yml.yaml = True
- yaml_generator(parse_yml, **kwargs)
- else:
- raise BlockToolException('Invalid file path.')
- else:
- self = ModToolMakeYAML(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- get_pattern(self)
- run(self)
-
-def get_pattern(self):
- """ Get the regex pattern for block(s) to be parsed """
- if self.info['pattern'] is None:
- block_candidates = get_block_candidates()
- with SequenceCompleter(block_candidates):
- self.info['pattern'] = cli_input('Which blocks do you want to parse? (Regex): ')
- if not self.info['pattern'] or self.info['pattern'].isspace():
- self.info['pattern'] = '.'
diff --git a/gr-utils/python/modtool/cli/newmod.py b/gr-utils/python/modtool/cli/newmod.py
deleted file mode 100644
index cdb4b56cfb..0000000000
--- a/gr-utils/python/modtool/cli/newmod.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Create a whole new out-of-tree module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import os
-
-import click
-
-from gnuradio import gr
-from ..core import ModToolNewModule
-from .base import common_params, run, cli_input, ModToolException
-
-@click.command('newmod', short_help=ModToolNewModule.description)
-@click.option('--srcdir',
- help="Source directory for the module template.")
-@common_params
-@click.argument('module_name', metavar="MODULE-NAME", nargs=1, required=False)
-def cli(**kwargs):
- """
- \b
- Create a new out-of-tree module
-
- The argument MODULE-NAME is the name of the module to be added.
- """
- kwargs['cli'] = True
- self = ModToolNewModule(**kwargs)
- get_modname(self)
- self.dir = os.path.join(self.dir, 'gr-{}'.format(self.info['modname']))
- try:
- os.stat(self.dir)
- except OSError:
- pass # This is what should happen
- else:
- raise ModToolException('The given directory exists.')
- if self.srcdir is None:
- self.srcdir = os.path.join(gr.prefix(),'share','gnuradio','modtool','templates','gr-newmod')
- if not os.path.isdir(self.srcdir):
- raise ModToolException('Could not find gr-newmod source dir.')
- run(self)
-
-def get_modname(self):
- """ Get the name of the new module to be added """
- if self.info['modname'] is None:
- while not self.info['modname'] or self.info['modname'].isspace():
- self.info['modname'] = cli_input('Name of the new module: ')
- if not re.match('[a-zA-Z0-9_]+$', self.info['modname']):
- raise ModToolException('Invalid module name.')
diff --git a/gr-utils/python/modtool/cli/rename.py b/gr-utils/python/modtool/cli/rename.py
deleted file mode 100644
index 86777fa3fb..0000000000
--- a/gr-utils/python/modtool/cli/rename.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to rename blocks """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-
-import click
-
-from ..core import get_block_candidates, ModToolRename
-from ..tools import SequenceCompleter
-from .base import common_params, block_name, run, cli_input, ModToolException
-
-
-@click.command('rename', short_help=ModToolRename.description)
-@common_params
-@block_name
-@click.argument('new-name', metavar="NEW-BLOCK-NAME", nargs=1, required=False)
-def cli(**kwargs):
- """
- \b
- Rename a block inside a module.
-
- The argument NEW-BLOCK-NAME is the new name of the block.
- """
- kwargs['cli'] = True
- self = ModToolRename(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- # first make sure the old block name is provided
- get_oldname(self)
- click.secho("Block/code to rename identifier: " + self.info['oldname'], fg='green')
- self.info['fulloldname'] = self.info['modname'] + '_' + self.info['oldname']
- # now get the new block name
- get_newname(self)
- click.secho("Block/code identifier: " + self.info['newname'], fg='green')
- self.info['fullnewname'] = self.info['modname'] + '_' + self.info['newname']
- run(self)
-
-def get_oldname(self):
- """ Get the old block name to be replaced """
- block_candidates = get_block_candidates()
- if self.info['oldname'] is None:
- with SequenceCompleter(block_candidates):
- while not self.info['oldname'] or self.info['oldname'].isspace():
- self.info['oldname'] = cli_input("Enter name of block/code to rename "+
- "(without module name prefix): ")
- if self.info['oldname'] not in block_candidates:
- choices = [x for x in block_candidates if self.info['oldname'] in x]
- if len(choices) > 0:
- click.secho("Suggested alternatives: "+str(choices), fg='yellow')
- raise ModToolException("Blockname for renaming does not exists!")
- if not re.match('[a-zA-Z0-9_]+', self.info['oldname']):
- raise ModToolException('Invalid block name.')
-
-def get_newname(self):
- """ Get the new block name """
- if self.info['newname'] is None:
- while not self.info['newname'] or self.info['newname'].isspace():
- self.info['newname'] = cli_input("Enter name of block/code "+
- "(without module name prefix): ")
- if not re.match('[a-zA-Z0-9_]+', self.info['newname']):
- raise ModToolException('Invalid block name.')
diff --git a/gr-utils/python/modtool/cli/rm.py b/gr-utils/python/modtool/cli/rm.py
deleted file mode 100644
index f4447d750a..0000000000
--- a/gr-utils/python/modtool/cli/rm.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Remove blocks module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import click
-
-from ..core import get_block_candidates, ModToolRemove
-from ..tools import SequenceCompleter
-from .base import common_params, block_name, run, cli_input
-
-
-@click.command('remove', short_help=ModToolRemove.description)
-@common_params
-@block_name
-def cli(**kwargs):
- """ Remove block (delete files and remove Makefile entries) """
- kwargs['cli'] = True
- self = ModToolRemove(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- get_pattern(self)
- run(self)
-
-def get_pattern(self):
- """ Returns the regex pattern for block(s) to be removed """
- if self.info['pattern'] is None:
- block_candidates = get_block_candidates()
- with SequenceCompleter(block_candidates):
- self.info['pattern'] = cli_input('Which blocks do you want to delete? (Regex): ')
- if not self.info['pattern'] or self.info['pattern'].isspace():
- self.info['pattern'] = '.'
diff --git a/gr-utils/python/modtool/cli/update.py b/gr-utils/python/modtool/cli/update.py
deleted file mode 100644
index 285007e15a..0000000000
--- a/gr-utils/python/modtool/cli/update.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to convert XML bindings to YAML bindings """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import click
-
-from ..core import get_xml_candidates, ModToolUpdate
-from ..tools import SequenceCompleter
-from .base import block_name, run, cli_input, ModToolException
-
-
-@click.command('update', short_help=ModToolUpdate.description)
-@click.option('--complete', is_flag=True, default=None,
- help="Convert all the XML bindings to YAML.")
-@click.option('-I', '--include-blacklisted', is_flag=True, default=None,
- help="Include XML files with blacklisted names in the conversion process")
-@block_name
-def cli(**kwargs):
- """ Update the XML bindings to YAML bindings """
- kwargs['cli'] = True
- self = ModToolUpdate(**kwargs)
- click.secho("GNU Radio module name identified: " + self.info['modname'], fg='green')
- get_blockname(self)
- run(self)
-
-def get_blockname(self):
- """ Returns the blockname for block to be updated """
- if self.info['complete']:
- return
- block_candidates = get_xml_candidates()
- if self.info['blockname'] is None:
- with SequenceCompleter(block_candidates):
- self.info['blockname'] = cli_input('Which block do you wish to update? : ')
- if not self.info['blockname'] or self.info['blockname'].isspace():
- raise ModToolException('Block name not specified!')
- if self.info['blockname'] not in block_candidates:
- choices = [x for x in block_candidates if self.info['blockname'] in x]
- if len(choices) > 0:
- click.secho("Suggested alternatives: "+str(choices), fg='yellow')
- raise ModToolException("The XML bindings does not exists!")
diff --git a/gr-utils/python/modtool/core/CMakeLists.txt b/gr-utils/python/modtool/core/CMakeLists.txt
deleted file mode 100644
index 764e79d09c..0000000000
--- a/gr-utils/python/modtool/core/CMakeLists.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2011, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- add.py
- base.py
- disable.py
- info.py
- makeyaml.py
- newmod.py
- rm.py
- rename.py
- update.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/modtool/core
-)
diff --git a/gr-utils/python/modtool/core/__init__.py b/gr-utils/python/modtool/core/__init__.py
deleted file mode 100644
index 5f89eee30f..0000000000
--- a/gr-utils/python/modtool/core/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Copyright 2013-2014, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from .base import ModTool, ModToolException, get_block_candidates
-from .add import ModToolAdd
-from .disable import ModToolDisable
-from .info import ModToolInfo
-from .makeyaml import ModToolMakeYAML, yaml_generator
-from .newmod import ModToolNewModule
-from .rm import ModToolRemove
-from .rename import ModToolRename
-from .update import ModToolUpdate, get_xml_candidates
diff --git a/gr-utils/python/modtool/core/add.py b/gr-utils/python/modtool/core/add.py
deleted file mode 100644
index adacb1ef90..0000000000
--- a/gr-utils/python/modtool/core/add.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#
-# Copyright 2013-2014,2017-2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to add new blocks """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import logging
-
-from ..tools import render_template, append_re_line_sequence, CMakeFileEditor
-from ..templates import Templates
-from .base import ModTool, ModToolException
-
-logger = logging.getLogger(__name__)
-
-
-class ModToolAdd(ModTool):
- """ Add block to the out-of-tree module. """
- name = 'add'
- description = 'Add new block into a module.'
- block_types = ('sink', 'source', 'sync', 'decimator', 'interpolator',
- 'general', 'tagged_stream', 'hier', 'noblock')
- language_candidates = ('cpp', 'python', 'c++')
-
- def __init__(self, blockname=None, block_type=None, lang=None, copyright=None,
- license_file=None, argument_list="", add_python_qa=False,
- add_cpp_qa=False, skip_cmakefiles=False, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['blocktype'] = block_type
- self.info['lang'] = lang
- self.license_file = license_file
- self.info['copyrightholder'] = copyright
- self.info['arglist'] = argument_list
- self.add_py_qa = add_python_qa
- self.add_cc_qa = add_cpp_qa
- self.skip_cmakefiles = skip_cmakefiles
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if self.info['blocktype'] is None:
- raise ModToolException('Blocktype not specified.')
- if self.info['blocktype'] not in self.block_types:
- raise ModToolException('Invalid blocktype')
- if self.info['lang'] is None:
- raise ModToolException('Programming language not specified.')
- if self.info['lang'] not in self.language_candidates:
- raise ModToolException('Invalid programming language.')
- if self.info['blocktype'] == 'tagged_stream' and self.info['lang'] == 'python':
- raise ModToolException('Tagged Stream Blocks for Python currently unsupported')
- if self.info['blockname'] is None:
- raise ModToolException('Blockname not specified.')
- if not re.match('^[a-zA-Z0-9_]+$', self.info['blockname']):
- raise ModToolException('Invalid block name.')
- if not isinstance(self.add_py_qa, bool):
- raise ModToolException('Expected a boolean value for add_python_qa.')
- if not isinstance(self.add_cc_qa, bool):
- raise ModToolException('Expected a boolean value for add_cpp_qa.')
- if not isinstance(self.skip_cmakefiles, bool):
- raise ModToolException('Expected a boolean value for skip_cmakefiles.')
-
- def assign(self):
- if self.info['lang'] == 'c++':
- self.info['lang'] = 'cpp'
- if ((self.skip_subdirs['lib'] and self.info['lang'] == 'cpp')
- or (self.skip_subdirs['python'] and self.info['lang'] == 'python')):
- raise ModToolException('Missing or skipping relevant subdir.')
- self.info['fullblockname'] = self.info['modname'] + '_' + self.info['blockname']
- if not self.license_file:
- if self.info['copyrightholder'] is None:
- self.info['copyrightholder'] = '<+YOU OR YOUR COMPANY+>'
- self.info['license'] = self.setup_choose_license()
- if (self.info['blocktype'] in ('noblock') or self.skip_subdirs['python']):
- self.add_py_qa = False
- if not self.info['lang'] == 'cpp':
- self.add_cc_qa = False
- if self.info['version'] == 'autofoo' and not self.skip_cmakefiles:
- self.skip_cmakefiles = True
-
- def setup_choose_license(self):
- """ Select a license by the following rules, in this order:
- 1) The contents of the file given by --license-file
- 2) The contents of the file LICENSE or LICENCE in the modules
- top directory
- 3) The default license. """
- if self.license_file is not None \
- and os.path.isfile(self.license_file):
- with open(self.license_file) as f:
- return f.read()
- elif os.path.isfile('LICENSE'):
- with open('LICENSE') as f:
- return f.read()
- elif os.path.isfile('LICENCE'):
- with open('LICENCE') as f:
- return f.read()
- elif self.info['is_component']:
- return Templates['grlicense']
- else:
- return Templates['defaultlicense'].format(**self.info)
-
- def _write_tpl(self, tpl, path, fname):
- """ Shorthand for writing a substituted template to a file"""
- path_to_file = os.path.join(path, fname)
- logger.info("Adding file '{}'...".format(path_to_file))
- with open(path_to_file, 'w') as f:
- f.write(render_template(tpl, **self.info))
- self.scm.add_files((path_to_file,))
-
- def run(self):
- """ Go, go, go. """
-
- # Some validation covered by the CLI - validate all parameters here
- self.validate()
- self.assign()
-
- has_swig = (
- self.info['lang'] == 'cpp'
- and not self.skip_subdirs['swig']
- )
- has_grc = False
- if self.info['lang'] == 'cpp':
- self._run_lib()
- has_grc = has_swig
- else: # Python
- self._run_python()
- if self.info['blocktype'] != 'noblock':
- has_grc = True
- if has_swig:
- self._run_swig()
- if self.add_py_qa:
- self._run_python_qa()
- if has_grc and not self.skip_subdirs['grc']:
- self._run_grc()
-
- def _run_cc_qa(self):
- " Add C++ QA files for 3.7 API if intructed from _run_lib"
- fname_qa_h = 'qa_{}.h'.format(self.info['blockname'])
- fname_qa_cc = 'qa_{}.cc'.format(self.info['blockname'])
- self._write_tpl('qa_cpp', 'lib', fname_qa_cc)
- self._write_tpl('qa_h', 'lib', fname_qa_h)
- if self.skip_cmakefiles:
- return
- try:
- append_re_line_sequence(self._file['cmlib'],
- r'list\(APPEND test_{}_sources.*\n'.format(self.info['modname']),
- 'qa_{}.cc'.format(self.info['blockname']))
- append_re_line_sequence(self._file['qalib'],
- '#include.*\n',
- '#include "{}"'.format(fname_qa_h))
- append_re_line_sequence(self._file['qalib'],
- '(addTest.*suite.*\n|new CppUnit.*TestSuite.*\n)',
- ' s->addTest(gr::{}::qa_{}::suite());'.format(self.info['modname'],
- self.info['blockname'])
- )
- self.scm.mark_files_updated((self._file['qalib'],))
- except IOError:
- logger.warning("Can't add C++ QA files.")
-
- def _run_cc_qa_boostutf(self):
- " Add C++ QA files for 3.8 API if intructed from _run_lib"
- fname_qa_cc = 'qa_{}.cc'.format(self.info['blockname'])
- self._write_tpl('qa_cpp_boostutf', 'lib', fname_qa_cc)
- if self.skip_cmakefiles:
- return
- try:
- append_re_line_sequence(self._file['cmlib'],
- r'list\(APPEND test_{}_sources.*\n'.format(self.info['modname']),
- 'qa_{}.cc'.format(self.info['blockname']))
- self.scm.mark_files_updated((self._file['cmlib'],))
- except IOError:
- logger.warning("Can't add C++ QA files.")
-
- def _run_lib(self):
- """ Do everything that needs doing in the subdir 'lib' and 'include'.
- - add .cc and .h files
- - include them into CMakeLists.txt
- - check if C++ QA code is req'd
- - if yes, create qa_*.{cc,h} and add them to CMakeLists.txt
- """
- fname_cc = None
- fname_h = None
- if self.info['version'] in ('37', '38'):
- fname_h = self.info['blockname'] + '.h'
- fname_cc = self.info['blockname'] + '.cc'
- if self.info['blocktype'] in ('source', 'sink', 'sync', 'decimator',
- 'interpolator', 'general', 'hier', 'tagged_stream'):
- fname_cc = self.info['blockname'] + '_impl.cc'
- self._write_tpl('block_impl_h', 'lib', self.info['blockname'] + '_impl.h')
- self._write_tpl('block_impl_cpp', 'lib', fname_cc)
- self._write_tpl('block_def_h', self.info['includedir'], fname_h)
- else: # Pre-3.7 or autotools
- fname_h = self.info['fullblockname'] + '.h'
- fname_cc = self.info['fullblockname'] + '.cc'
- self._write_tpl('block_h36', self.info['includedir'], fname_h)
- self._write_tpl('block_cpp36', 'lib', fname_cc)
- if self.add_cc_qa:
- if self.info['version'] == '38':
- self._run_cc_qa_boostutf()
- elif self.info['version'] == '37':
- self._run_cc_qa()
- elif self.info['version'] == '36':
- logger.warning("Warning: C++ QA files not supported for 3.6-style OOTs.")
- elif self.info['version'] == 'autofoo':
- logger.warning("Warning: C++ QA files not supported for autotools.")
- if not self.skip_cmakefiles:
- ed = CMakeFileEditor(self._file['cmlib'])
- cmake_list_var = '[a-z]*_?' + self.info['modname'] + '_sources'
- if not ed.append_value('list', fname_cc, to_ignore_start='APPEND ' + cmake_list_var):
- ed.append_value('add_library', fname_cc)
- ed.write()
- ed = CMakeFileEditor(self._file['cminclude'])
- ed.append_value('install', fname_h, to_ignore_end='DESTINATION[^()]+')
- ed.write()
- self.scm.mark_files_updated((self._file['cminclude'], self._file['cmlib']))
-
- def _run_swig(self):
- """ Do everything that needs doing in the subdir 'swig'.
- - Edit main *.i file
- """
- if self._get_mainswigfile() is None:
- logger.warning('Warning: No main swig file found.')
- return
- logger.info("Editing {}...".format(self._file['swig']))
- mod_block_sep = '/'
- if self.info['version'] == '36':
- mod_block_sep = '_'
- swig_block_magic_str = render_template('swig_block_magic', **self.info)
- with open(self._file['swig'], 'a') as f:
- f.write(swig_block_magic_str)
- include_str = '#include "{}{}{}.h"'.format(
- {True: 'gnuradio/' + self.info['modname'], False: self.info['modname']}[self.info['is_component']],
- mod_block_sep,
- self.info['blockname'])
- with open(self._file['swig'], 'r') as f:
- oldfile = f.read()
- if re.search('#include', oldfile):
- append_re_line_sequence(self._file['swig'], '^#include.*\n', include_str)
- else: # I.e., if the swig file is empty
- regexp = re.compile(r'^%\{\n', re.MULTILINE)
- oldfile = regexp.sub('%%{\n%s\n' % include_str, oldfile, count=1)
- with open(self._file['swig'], 'w') as f:
- f.write(oldfile)
- self.scm.mark_files_updated((self._file['swig'],))
-
- def _run_python_qa(self):
- """ Do everything that needs doing in the subdir 'python' to add
- QA code.
- - add .py files
- - include in CMakeLists.txt
- """
- fname_py_qa = 'qa_' + self.info['blockname'] + '.py'
- self._write_tpl('qa_python', self.info['pydir'], fname_py_qa)
- os.chmod(os.path.join(self.info['pydir'], fname_py_qa), 0o755)
- self.scm.mark_files_updated((os.path.join(self.info['pydir'], fname_py_qa),))
- if self.skip_cmakefiles or CMakeFileEditor(self._file['cmpython']).check_for_glob('qa_*.py'):
- return
- logger.info("Editing {}/CMakeLists.txt...".format(self.info['pydir']))
- with open(self._file['cmpython'], 'a') as f:
- f.write(
- 'GR_ADD_TEST(qa_%s ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/%s)\n' % \
- (self.info['blockname'], fname_py_qa))
- self.scm.mark_files_updated((self._file['cmpython'],))
-
- def _run_python(self):
- """ Do everything that needs doing in the subdir 'python' to add
- a Python block.
- - add .py file
- - include in CMakeLists.txt
- - include in __init__.py
- """
- fname_py = self.info['blockname'] + '.py'
- self._write_tpl('block_python', self.info['pydir'], fname_py)
- append_re_line_sequence(self._file['pyinit'],
- '(^from.*import.*\n|# import any pure.*\n)',
- 'from .{} import {}'.format(self.info['blockname'], self.info['blockname']))
- self.scm.mark_files_updated((self._file['pyinit'],))
- if self.skip_cmakefiles:
- return
- ed = CMakeFileEditor(self._file['cmpython'])
- ed.append_value('GR_PYTHON_INSTALL', fname_py, to_ignore_end='DESTINATION[^()]+')
- ed.write()
- self.scm.mark_files_updated((self._file['cmpython'],))
-
- def _run_grc(self):
- """ Do everything that needs doing in the subdir 'grc' to add
- a GRC bindings YAML file.
- - add .yml file
- - include in CMakeLists.txt
- """
- fname_grc = self.info['fullblockname'] + '.block.yml'
- self._write_tpl('grc_yml', 'grc', fname_grc)
- ed = CMakeFileEditor(self._file['cmgrc'], '\n ')
- if self.skip_cmakefiles or ed.check_for_glob('*.yml'):
- return
- logger.info("Editing grc/CMakeLists.txt...")
- ed.append_value('install', fname_grc, to_ignore_end='DESTINATION[^()]+')
- ed.write()
- self.scm.mark_files_updated((self._file['cmgrc'],))
diff --git a/gr-utils/python/modtool/core/base.py b/gr-utils/python/modtool/core/base.py
deleted file mode 100644
index 4073756ca7..0000000000
--- a/gr-utils/python/modtool/core/base.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#
-# Copyright 2013, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Base class for the modules """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import glob
-import logging
-import itertools
-
-from types import SimpleNamespace
-from gnuradio import gr
-from ..tools import get_modname, SCMRepoFactory
-
-logger = logging.getLogger('gnuradio.modtool')
-
-
-def get_block_candidates():
- """ Returns a list of all possible blocknames """
- block_candidates = []
- cpp_filters = ["*.cc", "*.cpp"]
- cpp_blocks = []
- for ftr in cpp_filters:
- cpp_blocks += [x for x in glob.glob1("lib", ftr) if not (x.startswith('qa_') or
- x.startswith('test_'))]
- python_blocks = [x for x in glob.glob1("python", "*.py") if not (x.startswith('qa_') or
- x.startswith('build') or x.startswith('__init__'))]
- for block in itertools.chain(cpp_blocks, python_blocks):
- block = os.path.splitext(block)[0]
- block = block.split('_impl')[0]
- block_candidates.append(block)
- return block_candidates
-
-
-class ModToolException(Exception):
- """ Standard exception for modtool classes. """
- pass
-
-
-class ModTool(object):
- """ Base class for all modtool command classes. """
- name = 'base'
- description = None
-
- def __init__(self, blockname=None, module_name=None, **kwargs):
- # List subdirs where stuff happens
- self._subdirs = ['lib', 'include', 'python', 'swig', 'grc']
- self.has_subdirs = {}
- self.skip_subdirs = {}
- self.info = {}
- self._file = {}
- for subdir in self._subdirs:
- self.has_subdirs[subdir] = False
- self.skip_subdirs[subdir] = False
- self.info['blockname'] = blockname
- self.info['modname'] = module_name
- self.cli = kwargs.get('cli', False)
- self.dir = kwargs.get('directory', '.')
- self.skip_subdirs['lib'] = kwargs.get('skip_lib', False)
- self.skip_subdirs['python'] = kwargs.get('skip_python', False)
- self.skip_subdirs['swig'] = kwargs.get('skip_swig', False)
- self.skip_subdirs['grc'] = kwargs.get('skip_grc', False)
- self._scm = kwargs.get('scm_mode',
- gr.prefs().get_string('modtool', 'scm_mode', 'no'))
- if not self.cli:
- logging.basicConfig(level=logging.ERROR, format='%(message)s')
- self.info['yes'] = True
- else:
- self.info['yes'] = kwargs.get('yes', False)
- from ..cli import setup_cli_logger
- setup_cli_logger(logger)
-
- if not type(self).__name__ in ['ModToolInfo', 'ModToolNewModule']:
- if self.cli:
- self._validate()
-
- def _validate(self):
- """ Validates the arguments """
- if not isinstance(self.skip_subdirs['lib'], bool):
- raise ModToolException('Expected a boolean value for skip_lib')
- if not isinstance(self.skip_subdirs['swig'], bool):
- raise ModToolException('Expected a boolean value for skip_swig')
- if not isinstance(self.skip_subdirs['python'], bool):
- raise ModToolException('Expected a boolean value for skip_python')
- if not isinstance(self.skip_subdirs['grc'], bool):
- raise ModToolException('Expected a boolean value for skip_grc')
- self._assign()
-
- def _assign(self):
- if not self._check_directory(self.dir):
- raise ModToolException('No GNU Radio module found in the given directory.')
- if self.info['modname'] is None:
- self.info['modname'] = get_modname()
- if self.info['modname'] is None:
- raise ModToolException('No GNU Radio module found in the given directory.')
- if self.info['version'] == '36' and (
- os.path.isdir(os.path.join('include', self.info['modname'])) or
- os.path.isdir(os.path.join('include', 'gnuradio', self.info['modname']))
- ):
- self.info['version'] = '37'
- if not os.path.isfile(os.path.join('cmake', 'Modules', 'FindCppUnit.cmake')):
- self.info['version'] = '38'
- if self.skip_subdirs['lib'] or not self.has_subdirs['lib']:
- self.skip_subdirs['lib'] = True
- if not self.has_subdirs['python']:
- self.skip_subdirs['python'] = True
- if self._get_mainswigfile() is None or not self.has_subdirs['swig']:
- self.skip_subdirs['swig'] = True
- if not self.has_subdirs['grc']:
- self.skip_subdirs['grc'] = True
-
- self._setup_files()
- self._setup_scm()
-
- def _setup_files(self):
- """ Initialise the self._file[] dictionary """
- if not self.skip_subdirs['swig']:
- self._file['swig'] = os.path.join('swig', self._get_mainswigfile())
- self.info['pydir'] = 'python'
- if os.path.isdir(os.path.join('python', self.info['modname'])):
- self.info['pydir'] = os.path.join('python', self.info['modname'])
- self._file['qalib'] = os.path.join('lib', 'qa_{}.cc'.format(self.info['modname']))
- self._file['pyinit'] = os.path.join(self.info['pydir'], '__init__.py')
- self._file['cmlib'] = os.path.join('lib', 'CMakeLists.txt')
- self._file['cmgrc'] = os.path.join('grc', 'CMakeLists.txt')
- self._file['cmpython'] = os.path.join(self.info['pydir'], 'CMakeLists.txt')
- if self.info['is_component']:
- self.info['includedir'] = os.path.join('include', 'gnuradio', self.info['modname'])
- elif self.info['version'] in ('37', '38'):
- self.info['includedir'] = os.path.join('include', self.info['modname'])
- else:
- self.info['includedir'] = 'include'
- self._file['cminclude'] = os.path.join(self.info['includedir'], 'CMakeLists.txt')
- self._file['cmswig'] = os.path.join('swig', 'CMakeLists.txt')
- self._file['cmfind'] = os.path.join('cmake', 'Modules', 'howtoConfig.cmake')
-
-
- def _setup_scm(self, mode='active'):
- """ Initialize source control management. """
- self.options = SimpleNamespace(scm_mode = self._scm)
- if mode == 'active':
- self.scm = SCMRepoFactory(self.options, '.').make_active_scm_manager()
- else:
- self.scm = SCMRepoFactory(self.options, '.').make_empty_scm_manager()
- if self.scm is None:
- logger.error("Error: Can't set up SCM.")
- exit(1)
-
- def _check_directory(self, directory):
- """ Guesses if dir is a valid GNU Radio module directory by looking for
- CMakeLists.txt and at least one of the subdirs lib/, python/ and swig/.
- Changes the directory, if valid. """
- has_makefile = False
- try:
- files = os.listdir(directory)
- os.chdir(directory)
- except OSError:
- logger.error("Can't read or chdir to directory {}.".format(directory))
- return False
- self.info['is_component'] = False
- for f in files:
- if os.path.isfile(f) and f == 'CMakeLists.txt':
- with open(f) as filetext:
- if re.search(r'find_package\(Gnuradio', filetext.read()) is not None:
- self.info['version'] = '36' # Might be 37, check that later
- has_makefile = True
- elif re.search('GR_REGISTER_COMPONENT', filetext.read()) is not None:
- self.info['version'] = '36' # Might be 37, check that later
- self.info['is_component'] = True
- has_makefile = True
- # TODO search for autofoo
- elif os.path.isdir(f):
- if (f in list(self.has_subdirs.keys())):
- self.has_subdirs[f] = True
- else:
- self.skip_subdirs[f] = True
- return bool(has_makefile and (list(self.has_subdirs.values())))
-
- def _get_mainswigfile(self):
- """ Find out which name the main SWIG file has. In particular, is it
- a MODNAME.i or a MODNAME_swig.i? Returns None if none is found. """
- modname = self.info['modname']
- swig_files = (modname + '.i',
- modname + '_swig.i')
- for fname in swig_files:
- if os.path.isfile(os.path.join(self.dir, 'swig', fname)):
- return fname
- return None
-
- def run(self):
- """ Override this. """
- raise NotImplementedError('Module implementation missing')
diff --git a/gr-utils/python/modtool/core/disable.py b/gr-utils/python/modtool/core/disable.py
deleted file mode 100644
index 1fb4628312..0000000000
--- a/gr-utils/python/modtool/core/disable.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#
-# Copyright 2013, 2018, 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Disable blocks module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import sys
-import logging
-
-from ..tools import CMakeFileEditor
-from .base import ModTool, ModToolException
-
-logger = logging.getLogger(__name__)
-
-
-class ModToolDisable(ModTool):
- """ Disable block (comments out CMake entries for files) """
- name = 'disable'
- description = 'Disable selected block in module.'
-
- def __init__(self, blockname=None, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['pattern'] = blockname
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if not self.info['pattern'] or self.info['pattern'].isspace():
- raise ModToolException("Invalid pattern!")
-
- def run(self):
- """ Go, go, go! """
- def _handle_py_qa(cmake, fname):
- """ Do stuff for py qa """
- cmake.comment_out_lines('GR_ADD_TEST.*'+fname)
- self.scm.mark_file_updated(cmake.filename)
- return True
- def _handle_py_mod(cmake, fname):
- """ Do stuff for py extra files """
- try:
- with open(self._file['pyinit']) as f:
- initfile = f.read()
- except IOError:
- logger.warning("Could not edit __init__.py, that might be a problem.")
- return False
- pymodname = os.path.splitext(fname)[0]
- initfile = re.sub(r'((from|import)\s+\b'+pymodname+r'\b)', r'#\1', initfile)
- with open(self._file['pyinit'], 'w') as f:
- f.write(initfile)
- self.scm.mark_file_updated(self._file['pyinit'])
- return False
- def _handle_cc_qa(cmake, fname):
- """ Do stuff for cc qa """
- if self.info['version'] == '37':
- cmake.comment_out_lines(r'\$\{CMAKE_CURRENT_SOURCE_DIR\}/'+fname)
- fname_base = os.path.splitext(fname)[0]
- ed = CMakeFileEditor(self._file['qalib']) # Abusing the CMakeFileEditor...
- ed.comment_out_lines(r'#include\s+"{}.h"'.format(fname_base), comment_str='//')
- ed.comment_out_lines(r'{}::suite\(\)'.format(fname_base), comment_str='//')
- ed.write()
- self.scm.mark_file_updated(self._file['qalib'])
- elif self.info['version'] == '38':
- fname_qa_cc = 'qa_{}.cc'.format(self.info['blockname'])
- cmake.comment_out_lines(fname_qa_cc)
- elif self.info['version'] == '36':
- cmake.comment_out_lines('add_executable.*'+fname)
- cmake.comment_out_lines('target_link_libraries.*'+os.path.splitext(fname)[0])
- cmake.comment_out_lines('GR_ADD_TEST.*'+os.path.splitext(fname)[0])
- self.scm.mark_file_updated(cmake.filename)
- return True
- def _handle_h_swig(cmake, fname):
- """ Comment out include files from the SWIG file,
- as well as the block magic """
- with open(self._file['swig']) as f:
- swigfile = f.read()
- (swigfile, nsubs) = re.subn(r'(.include\s+"({}/)?{}")'.format(
- self.info['modname'], fname),
- r'//\1', swigfile)
- if nsubs > 0:
- logger.info("Changing {}...".format(self._file['swig']))
- if nsubs > 1: # Need to find a single BLOCK_MAGIC
- blockname = os.path.splitext(fname[len(self.info['modname'])+1:])[0]
- if self.info['version'] in ('37', '38'):
- blockname = os.path.splitext(fname)[0]
- (swigfile, nsubs) = re.subn('(GR_SWIG_BLOCK_MAGIC2?.+{}.+;)'.format(blockname), r'//\1', swigfile)
- if nsubs > 1:
- logger.warning("Hm, changed more then expected while editing {}.".format(self._file['swig']))
- with open(self._file['swig'], 'w') as f:
- f.write(swigfile)
- self.scm.mark_file_updated(self._file['swig'])
- return False
- def _handle_i_swig(cmake, fname):
- """ Comment out include files from the SWIG file,
- as well as the block magic """
- with open(self._file['swig']) as f:
- swigfile = f.read()
- blockname = os.path.splitext(fname[len(self.info['modname'])+1:])[0]
- if self.info['version'] in ('37', '38'):
- blockname = os.path.splitext(fname)[0]
- swigfile = re.sub(r'(%include\s+"'+fname+'")', r'//\1', swigfile)
- logger.info("Changing {}...".format(self._file['swig']))
- swigfile = re.sub('(GR_SWIG_BLOCK_MAGIC2?.+'+blockname+'.+;)', r'//\1', swigfile)
- with open(self._file['swig'], 'w') as f:
- f.write(swigfile)
- self.scm.mark_file_updated(self._file['swig'])
- return False
-
- # This portion will be covered by the CLI
- if not self.cli:
- self.validate()
- else:
- from ..cli import cli_input
- # List of special rules: 0: subdir, 1: filename re match, 2: callback
- special_treatments = (
- ('python', r'qa.+py$', _handle_py_qa),
- ('python', r'^(?!qa).+py$', _handle_py_mod),
- ('lib', r'qa.+\.cc$', _handle_cc_qa),
- ('include/{}'.format(self.info['modname']), r'.+\.h$', _handle_h_swig),
- ('include', r'.+\.h$', _handle_h_swig),
- ('swig', r'.+\.i$', _handle_i_swig)
- )
- for subdir in self._subdirs:
- if self.skip_subdirs[subdir]:
- continue
- if self.info['version'] in ('37', '38') and subdir == 'include':
- subdir = 'include/{}'.format(self.info['modname'])
- try:
- cmake = CMakeFileEditor(os.path.join(subdir, 'CMakeLists.txt'))
- except IOError:
- continue
- logger.info("Traversing {}...".format(subdir))
- filenames = cmake.find_filenames_match(self.info['pattern'])
- yes = self.info['yes']
- for fname in filenames:
- file_disabled = False
- if not yes:
- ans = cli_input("Really disable {}? [Y/n/a/q]: ".format(fname)).lower().strip()
- if ans == 'a':
- yes = True
- if ans == 'q':
- sys.exit(0)
- if ans == 'n':
- continue
- for special_treatment in special_treatments:
- if special_treatment[0] == subdir and re.match(special_treatment[1], fname):
- file_disabled = special_treatment[2](cmake, fname)
- if not file_disabled:
- cmake.disable_file(fname)
- cmake.write()
- self.scm.mark_files_updated((os.path.join(subdir, 'CMakeLists.txt'),))
- logger.warning("Careful: 'gr_modtool disable' does not resolve dependencies.")
diff --git a/gr-utils/python/modtool/core/info.py b/gr-utils/python/modtool/core/info.py
deleted file mode 100644
index e242af0e6e..0000000000
--- a/gr-utils/python/modtool/core/info.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-# Copyright 2013, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Returns information about a module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-
-from ..tools import get_modname
-from .base import ModTool, ModToolException
-
-
-class ModToolInfo(ModTool):
- """ Return information about a given module """
- name = 'info'
- description = 'Return information about a given module.'
-
- def __init__(self, python_readable=False, suggested_dirs=None, **kwargs):
- ModTool.__init__(self, **kwargs)
- # Don't call ModTool._validate(), is is too chatty!
- self._directory = self.dir
- self._python_readable = python_readable
- self._suggested_dirs = suggested_dirs
-
- def run(self):
- """ Go, go, go! """
- mod_info = dict()
- mod_info['base_dir'] = self._get_base_dir(self._directory)
- if mod_info['base_dir'] is None:
- raise ModToolException('{}' if self._python_readable else "No module found.")
- os.chdir(mod_info['base_dir'])
- mod_info['modname'] = get_modname()
- if mod_info['modname'] is None:
- raise ModToolException('{}' if self._python_readable else "No module found.")
- if self.info['version'] == '36' and (
- os.path.isdir(os.path.join('include', mod_info['modname'])) or
- os.path.isdir(os.path.join('include', 'gnuradio', mod_info['modname']))
- ):
- self.info['version'] = '37'
- if not os.path.isfile(os.path.join('cmake', 'Modules', 'FindCppUnit.cmake')):
- self.info['version'] = '38'
- mod_info['version'] = self.info['version']
- if 'is_component' in list(self.info.keys()) and self.info['is_component']:
- mod_info['is_component'] = True
- mod_info['incdirs'] = []
- mod_incl_dir = os.path.join(mod_info['base_dir'], 'include')
- if os.path.isdir(os.path.join(mod_incl_dir, mod_info['modname'])):
- mod_info['incdirs'].append(os.path.join(mod_incl_dir, mod_info['modname']))
- else:
- mod_info['incdirs'].append(mod_incl_dir)
- build_dir = self._get_build_dir(mod_info)
- if build_dir is not None:
- mod_info['build_dir'] = build_dir
- mod_info['incdirs'] += self._get_include_dirs(mod_info)
- if self._python_readable:
- print(str(mod_info))
- else:
- self._pretty_print(mod_info)
-
- def _get_base_dir(self, start_dir):
- """ Figure out the base dir (where the top-level cmake file is) """
- base_dir = os.path.abspath(start_dir)
- if self._check_directory(base_dir):
- return base_dir
- else:
- (up_dir, this_dir) = os.path.split(base_dir)
- if os.path.split(up_dir)[1] == 'include':
- up_dir = os.path.split(up_dir)[0]
- if self._check_directory(up_dir):
- return up_dir
- return None
-
- def _get_build_dir(self, mod_info):
- """ Figure out the build dir (i.e. where you run 'cmake'). This checks
- for a file called CMakeCache.txt, which is created when running cmake.
- If that hasn't happened, the build dir cannot be detected, unless it's
- called 'build', which is then assumed to be the build dir. """
- base_build_dir = mod_info['base_dir']
- if 'is_component' in list(mod_info.keys()):
- (base_build_dir, rest_dir) = os.path.split(base_build_dir)
- has_build_dir = os.path.isdir(os.path.join(base_build_dir, 'build'))
- if (has_build_dir and os.path.isfile(os.path.join(base_build_dir, 'CMakeCache.txt'))):
- return os.path.join(base_build_dir, 'build')
- else:
- for (dirpath, dirnames, filenames) in os.walk(base_build_dir):
- if 'CMakeCache.txt' in filenames:
- return dirpath
- if has_build_dir:
- return os.path.join(base_build_dir, 'build')
- return None
-
- def _get_include_dirs(self, mod_info):
- """ Figure out include dirs for the make process. """
- inc_dirs = []
- path_or_internal = {True: 'INTERNAL',
- False: 'PATH'}['is_component' in list(mod_info.keys())]
- try:
- cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt'))
- for line in cmakecache_fid:
- if line.find('GNURADIO_RUNTIME_INCLUDE_DIRS:{}'.format(path_or_internal)) != -1:
- inc_dirs += line.replace('GNURADIO_RUNTIME_INCLUDE_DIRS:{}='.format(path_or_internal), '').strip().split(';')
- except IOError:
- pass
- if not inc_dirs and self._suggested_dirs is not None:
- inc_dirs = [os.path.normpath(path) for path in self._suggested_dirs.split(':') if os.path.isdir(path)]
- return inc_dirs
-
- def _pretty_print(elf, mod_info):
- """ Output the module info in human-readable format """
- index_names = {'base_dir': 'Base directory',
- 'modname': 'Module name',
- 'is_component': 'Is GR component',
- 'build_dir': 'Build directory',
- 'incdirs': 'Include directories'}
- for key in list(mod_info.keys()):
- if key == 'version':
- print(" API version: {}".format({
- '36': 'pre-3.7',
- '37': 'post-3.7',
- '38': 'post-3.8',
- 'autofoo': 'Autotools (pre-3.5)'
- }[mod_info['version']]))
- else:
- print('%19s: %s' % (index_names[key], mod_info[key]))
diff --git a/gr-utils/python/modtool/core/makeyaml.py b/gr-utils/python/modtool/core/makeyaml.py
deleted file mode 100644
index adf93cf4bd..0000000000
--- a/gr-utils/python/modtool/core/makeyaml.py
+++ /dev/null
@@ -1,353 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Automatically create YAML bindings for GRC from block code """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import glob
-import logging
-import yaml
-
-from collections import OrderedDict
-
-try:
- from yaml import CLoader as Loader, CDumper as Dumper
-except:
- from yaml import Loader, Dumper
-
-try:
- from gnuradio.blocktool.core import Constants
-except ImportError:
- have_blocktool = False
-else:
- have_blocktool = True
-
-from ..tools import ParserCCBlock, CMakeFileEditor, ask_yes_no, GRCYAMLGenerator
-from .base import ModTool, ModToolException
-
-
-logger = logging.getLogger(__name__)
-
-## setup dumper for dumping OrderedDict ##
-_MAPPING_TAG = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
-
-
-def dict_representer(dumper, data):
- """ Representer to represent special OrderedDict """
- return dumper.represent_dict(data.items())
-
-
-def dict_constructor(loader, node):
- """ Construct an OrderedDict for dumping """
- return OrderedDict(loader.construct_pairs(node))
-
-
-Dumper.add_representer(OrderedDict, dict_representer)
-Loader.add_constructor(_MAPPING_TAG, dict_constructor)
-
-
-class ModToolMakeYAML(ModTool):
- """ Make YAML file for GRC block bindings """
- name = 'makeyaml'
- description = 'Generate YAML files for GRC block bindings.'
-
- def __init__(self, blockname=None, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['pattern'] = blockname
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if not self.info['pattern'] or self.info['pattern'].isspace():
- raise ModToolException("Incorrect blockname (Regex)!")
-
- def run(self):
- """ Go, go, go! """
- # This portion will be covered by the CLI
- if not self.cli:
- self.validate()
- logger.warning(
- "Warning: This is an experimental feature. Don't expect any magic.")
- # 1) Go through lib/
- if not self.skip_subdirs['lib']:
- if self.info['version'] in ('37', '38'):
- files = self._search_files('lib', '*_impl.cc')
- else:
- files = self._search_files('lib', '*.cc')
- for f in files:
- if os.path.basename(f)[0:2] == 'qa':
- continue
- (params, iosig, blockname) = self._parse_cc_h(f)
- self._make_grc_yaml_from_block_data(params, iosig, blockname)
- # 2) Go through python/
- # TODO
-
- def _search_files(self, path, path_glob):
- """ Search for files matching pattern in the given path. """
- files = sorted(glob.glob("{}/{}".format(path, path_glob)))
- files_filt = []
- logger.info("Searching for matching files in {}/:".format(path))
- for f in files:
- if re.search(self.info['pattern'], os.path.basename(f)) is not None:
- files_filt.append(f)
- if len(files_filt) == 0:
- logger.info("None found.")
- return files_filt
-
- def _make_grc_yaml_from_block_data(self, params, iosig, blockname):
- """ Take the return values from the parser and call the YAML
- generator. Also, check the makefile if the .yml file is in there.
- If necessary, add. """
- fname_yml = '{}_{}.block.yml'.format(self.info['modname'], blockname)
- path_to_yml = os.path.join('grc', fname_yml)
- # Some adaptions for the GRC
- for inout in ('in', 'out'):
- if iosig[inout]['max_ports'] == '-1':
- iosig[inout]['max_ports'] = '$num_{}puts'.format(inout)
- params.append({'key': 'num_{}puts'.format(inout),
- 'type': 'int',
- 'name': 'Num {}puts'.format(inout),
- 'default': '2',
- 'in_constructor': False})
- file_exists = False
- if os.path.isfile(path_to_yml):
- if not self.info['yes']:
- if not ask_yes_no('Overwrite existing GRC file?', False):
- return
- else:
- file_exists = True
- logger.warning("Warning: Overwriting existing GRC file.")
- grc_generator = GRCYAMLGenerator(
- modname=self.info['modname'],
- blockname=blockname,
- params=params,
- iosig=iosig
- )
- grc_generator.save(path_to_yml)
- if file_exists:
- self.scm.mark_files_updated((path_to_yml,))
- else:
- self.scm.add_files((path_to_yml,))
- if not self.skip_subdirs['grc']:
- ed = CMakeFileEditor(self._file['cmgrc'])
- if re.search(fname_yml, ed.cfile) is None and not ed.check_for_glob('*.yml'):
- logger.info("Adding GRC bindings to grc/CMakeLists.txt...")
- ed.append_value('install', fname_yml,
- to_ignore_end='DESTINATION[^()]+')
- ed.write()
- self.scm.mark_files_updated(self._file['cmgrc'])
-
- def _parse_cc_h(self, fname_cc):
- """ Go through a .cc and .h-file defining a block and return info """
- def _type_translate(p_type, default_v=None):
- """ Translates a type from C++ to GRC """
- translate_dict = {'float': 'float',
- 'double': 'real',
- 'int': 'int',
- 'gr_complex': 'complex',
- 'char': 'byte',
- 'unsigned char': 'byte',
- 'std::string': 'string',
- 'std::vector<int>': 'int_vector',
- 'std::vector<float>': 'real_vector',
- 'std::vector<gr_complex>': 'complex_vector',
- }
- if p_type in ('int',) and default_v is not None and len(default_v) > 1 and default_v[:2].lower() == '0x':
- return 'hex'
- try:
- return translate_dict[p_type]
- except KeyError:
- return 'raw'
-
- def _get_blockdata(fname_cc):
- """ Return the block name and the header file name from the .cc file name """
- blockname = os.path.splitext(os.path.basename(
- fname_cc.replace('_impl.', '.')))[0]
- fname_h = (blockname + '.h').replace('_impl.', '.')
- contains_modulename = blockname.startswith(
- self.info['modname']+'_')
- blockname = blockname.replace(self.info['modname']+'_', '', 1)
- return (blockname, fname_h, contains_modulename)
- # Go, go, go
- logger.info("Making GRC bindings for {}...".format(fname_cc))
- (blockname, fname_h, contains_modulename) = _get_blockdata(fname_cc)
- try:
- parser = ParserCCBlock(fname_cc,
- os.path.join(
- self.info['includedir'], fname_h),
- blockname,
- self.info['version'],
- _type_translate
- )
- except IOError:
- raise ModToolException(
- "Can't open some of the files necessary to parse {}.".format(fname_cc))
-
- if contains_modulename:
- return (parser.read_params(), parser.read_io_signature(), self.info['modname']+'_'+blockname)
- else:
- return (parser.read_params(), parser.read_io_signature(), blockname)
-
-
-def yaml_generator(self, **kwargs):
- """
- Generate YAML file from the block header file using blocktool API
- """
- header = self.filename.split('.')[0]
- block = self.modname.split('-')[-1]
- label = header.split('_')
- del label[-1]
- yml_file = os.path.join('.', block+'_'+header+'.block.yml')
- _header = (('id', '{}_{}'.format(block, header)),
- ('label', ' '.join(label).upper()),
- ('category', '[{}]'.format(block.capitalize())),
- ('flags', '[python, cpp]')
- )
- params_list = [
- '${'+s['name']+'}' for s in self.parsed_data['properties'] if self.parsed_data['properties']]
- _templates = [('imports', 'from gnuradio import {}'.format(block)),
- ('make', '{}.{}({})'.format(block, header, ', '.join(params_list)))
- ]
-
- if self.parsed_data['methods']:
- list_callbacks = []
- for param in self.parsed_data['methods']:
- arguments = []
- for args in param['arguments_type']:
- arguments.append(args['name'])
- arg_list = ['${'+s+'}' for s in arguments if arguments]
- list_callbacks.append(
- param['name']+'({})'.format(', '.join(arg_list)))
- callback_key = ('callbacks')
- callbacks = (callback_key, tuple(list_callbacks))
- _templates.append(callbacks)
- _templates = tuple(_templates)
-
- data = OrderedDict()
- for tag, value in _header:
- data[tag] = value
-
- templates = OrderedDict()
- for tag, value in _templates:
- templates[tag] = value
- data['templates'] = templates
-
- parameters = []
- for param in self.parsed_data['properties']:
- parameter = OrderedDict()
- parameter['id'] = param['name']
- parameter['label'] = param['name'].capitalize()
- parameter['dtype'] = param['dtype']
- parameter['read_only'] = param['read_only']
- parameters.append(parameter)
- if parameters:
- data['parameters'] = parameters
-
- input_signature = []
- max_input_port = self.parsed_data['io_signature']['input']['max_streams']
- i_sig = self.parsed_data['io_signature']['input']['signature']
- for port in range(0, int(max_input_port)):
- input_sig = OrderedDict()
- if i_sig is Constants.MAKE:
- input_sig['domain'] = 'stream'
- input_sig['dtype'] = self.parsed_data['io_signature']['input']['sizeof_stream_item']
- elif i_sig is Constants.MAKE2:
- input_sig['domain'] = 'stream'
- input_sig['dtype'] = self.parsed_data['io_signature']['input']['sizeof_stream_item' +
- str(port+1)]
- elif i_sig is Constants.MAKE3:
- input_sig['domain'] = 'stream'
- input_sig['dtype'] = self.parsed_data['io_signature']['input']['sizeof_stream_item' +
- str(port+1)]
- elif i_sig is Constants.MAKEV:
- input_sig['domain'] = 'stream'
- input_sig['dtype'] = self.parsed_data['io_signature']['input']['sizeof_stream_items']
- input_signature.append(input_sig)
-
- if self.parsed_data['message_port']['input']:
- for _input in self.parsed_data['message_port']['input']:
- m_input_sig = OrderedDict()
- m_input_sig['domain'] = 'message'
- m_input_sig['id'] = _input
- input_signature.append(m_input_sig)
- if input_signature:
- data['inputs'] = input_signature
-
- output_signature = []
- max_output_port = self.parsed_data['io_signature']['output']['max_streams']
- o_sig = self.parsed_data['io_signature']['output']['signature']
- for port in range(0, int(max_output_port)):
- output_sig = OrderedDict()
- if o_sig is Constants.MAKE:
- output_sig['domain'] = 'stream'
- output_sig['dtype'] = self.parsed_data['io_signature']['output']['sizeof_stream_item']
- elif o_sig is Constants.MAKE2:
- output_sig['domain'] = 'stream'
- output_sig['dtype'] = self.parsed_data['io_signature']['output']['sizeof_stream_item' +
- str(port+1)]
- elif o_sig is Constants.MAKE3:
- output_sig['domain'] = 'stream'
- output_sig['dtype'] = self.parsed_data['io_signature']['output']['sizeof_stream_item' +
- str(port+1)]
- elif o_sig is Constants.MAKEV:
- output_sig['domain'] = 'stream'
- output_sig['dtype'] = self.parsed_data['io_signature']['output']['sizeof_stream_items']
- output_signature.append(output_sig)
-
- if self.parsed_data['message_port']['output']:
- for _output in self.parsed_data['message_port']['output']:
- m_output_sig = OrderedDict()
- m_output_sig['domain'] = 'message'
- m_output_sig['id'] = _output
- output_signature.append(m_output_sig)
- if output_signature:
- data['outputs'] = output_signature
-
- _cpp_templates = [('includes', '#include <gnuradio/{}/{}>'.format(block, self.filename)),
- ('declarations', '{}::{}::sptr ${{id}}'.format(block, header)),
- ('make', 'this->${{id}} = {}::{}::make({})'.format(
- block, header, ', '.join(params_list)))
- ]
-
- if self.parsed_data['methods']:
- list_callbacks = []
- for param in self.parsed_data['methods']:
- arguments = []
- for args in param['arguments_type']:
- arguments.append(args['name'])
- arg_list = ['${'+s+'}' for s in arguments if arguments]
- list_callbacks.append(
- param['name']+'({})'.format(', '.join(arg_list)))
- callback_key = ('callbacks')
- callbacks = (callback_key, tuple(list_callbacks))
- _cpp_templates.append(callbacks)
-
- link = ('link', 'gnuradio-{}'.format(block))
- _cpp_templates.append(link)
- _cpp_templates = tuple(_cpp_templates)
-
- cpp_templates = OrderedDict()
- for tag, value in _cpp_templates:
- cpp_templates[tag] = value
- data['cpp_templates'] = cpp_templates
-
- if self.parsed_data['docstring'] is not None:
- data['documentation'] = self.parsed_data['docstring']
- data['file_format'] = 1
-
- if kwargs['output']:
- with open(yml_file, 'w') as yml:
- yaml.dump(data, yml, Dumper=Dumper, default_flow_style=False)
- else:
- print(yaml.dump(data, Dumper=Dumper, allow_unicode=True,
- default_flow_style=False, indent=4))
diff --git a/gr-utils/python/modtool/core/newmod.py b/gr-utils/python/modtool/core/newmod.py
deleted file mode 100644
index 1d22626e10..0000000000
--- a/gr-utils/python/modtool/core/newmod.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Copyright 2013, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Create a whole new out-of-tree module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import shutil
-import os
-import re
-import logging
-
-from gnuradio import gr
-from ..tools import SCMRepoFactory
-from .base import ModTool, ModToolException
-
-logger = logging.getLogger(__name__)
-
-class ModToolNewModule(ModTool):
- """ Create a new out-of-tree module """
- name = 'newmod'
- description = 'Create new empty module, use add to add blocks.'
- def __init__(self, module_name=None, srcdir=None, **kwargs):
- ModTool.__init__(self, None, module_name, **kwargs)
- # Don't call ModTool._validate(), that assumes an existing module.
- self.srcdir = srcdir
- self.directory = self.dir
-
- def assign(self):
- self.dir = os.path.join(self.directory, 'gr-{}'.format(self.info['modname']))
- if self.srcdir is None:
- self.srcdir = os.path.join(gr.prefix(),'share','gnuradio','modtool','templates','gr-newmod')
-
- def validate(self):
- """ Validates the arguments """
- if not self.info['modname']:
- raise ModToolException('Module name not specified.')
- if not re.match('[a-zA-Z0-9_]+$', self.info['modname']):
- raise ModToolException('Invalid module name.')
- try:
- os.stat(self.dir)
- except OSError:
- pass # This is what should happen
- else:
- raise ModToolException('The given directory exists.')
- if not os.path.isdir(self.srcdir):
- raise ModToolException('Could not find gr-newmod source dir \'' + self.srcdir + '\'')
-
- def run(self):
- """
- * Copy the example dir recursively
- * Open all files, rename howto and HOWTO to the module name
- * Rename files and directories that contain the word howto
- """
- # This portion will be covered by the CLI
- if not self.cli:
- self.assign()
- self.validate()
- self._setup_scm(mode='new')
- logger.info("Creating out-of-tree module in {}...".format(self.dir))
- try:
- shutil.copytree(self.srcdir, self.dir)
- os.chdir(self.dir)
- except OSError:
- raise ModToolException('Could not create directory {}.'.format(self.dir))
- for root, dirs, files in os.walk('.'):
- for filename in files:
- f = os.path.join(root, filename)
- with open(f, 'r') as filetext:
- s = filetext.read()
- s = s.replace('howto', self.info['modname'])
- s = s.replace('HOWTO', self.info['modname'].upper())
- with open(f, 'w') as filetext:
- filetext.write(s)
- if filename.find('howto') != -1:
- os.rename(f, os.path.join(root, filename.replace('howto', self.info['modname'])))
- if os.path.basename(root) == 'howto':
- os.rename(root, os.path.join(os.path.dirname(root), self.info['modname']))
- logger.info("Done.")
- if self.scm.init_repo(path_to_repo="."):
- logger.info("Created repository... you might want to commit before continuing.")
- logger.info("Use 'gr_modtool add' to add a new block to this currently empty module.")
diff --git a/gr-utils/python/modtool/core/rename.py b/gr-utils/python/modtool/core/rename.py
deleted file mode 100644
index d202d2375a..0000000000
--- a/gr-utils/python/modtool/core/rename.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#
-# Copyright 2014, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to rename blocks """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import logging
-
-from .base import get_block_candidates, ModTool, ModToolException
-
-logger = logging.getLogger(__name__)
-
-
-class ModToolRename(ModTool):
- """ Rename a block in the out-of-tree module. """
- name = 'rename'
- description = 'Rename a block inside a module.'
-
- def __init__(self, blockname=None, new_name=None, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['oldname'] = blockname
- self.info['newname'] = new_name
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if not self.info['oldname']:
- raise ModToolException('Old block name (blockname) not specified.')
- if not re.match('[a-zA-Z0-9_]+', self.info['oldname']):
- raise ModToolException('Invalid block name.')
- block_candidates = get_block_candidates()
- if self.info['oldname'] not in block_candidates:
- choices = [x for x in block_candidates if self.info['oldname'] in x]
- if len(choices)>0:
- print("Suggested alternatives: "+str(choices))
- raise ModToolException("Blockname for renaming does not exists!")
- if not self.info['newname']:
- raise ModToolException('New blockname (new_name) not specified.')
- if not re.match('[a-zA-Z0-9_]+', self.info['newname']):
- raise ModToolException('Invalid new block name.')
-
- def assign(self):
- self.info['fullnewname'] = self.info['modname'] + '_' + self.info['newname']
-
- def run(self):
- """ Go, go, go. """
- # This portion will be covered by the CLI
- if not self.cli:
- self.validate()
- self.assign()
- module = self.info['modname']
- oldname = self.info['oldname']
- newname = self.info['newname']
- logger.info("In module '{}' rename block '{}' to '{}'".format(module, oldname, newname))
- self._run_swig_rename(self._file['swig'], oldname, newname)
- self._run_grc_rename(self.info['modname'], oldname, newname)
- self._run_python_qa(self.info['modname'], oldname, newname)
- self._run_python(self.info['modname'], oldname, newname)
- self._run_lib(self.info['modname'], oldname, newname)
- self._run_include(self.info['modname'], oldname, newname)
- return
-
- def _run_swig_rename(self, swigfilename, old, new):
- """ Rename SWIG includes and block_magic """
- nsubs = self._run_file_replace(swigfilename, old, new)
- if nsubs < 1:
- logger.info("Couldn't find '{}' in file '{}'.".format(old, swigfilename))
- if nsubs == 2:
- logger.info("Changing 'noblock' type file")
- if nsubs > 3:
- logger.warning("Hm, changed more then expected while editing {}.".format(swigfilename))
- return False
-
- def _run_lib(self, module, old, new):
- ccfile = './lib/' + old + '_impl.cc'
- if not os.path.isfile(ccfile): # in case it is a 'noblock'
- ccfile = './lib/' + old + '.cc'
- hfile = './lib/' + old + '_impl.h'
- self._run_file_replace(ccfile, old, new)
- self._run_file_replace(hfile, old, new)
- self._run_file_replace(hfile, old.upper(), new.upper()) # take care of include guards
- self._run_cmakelists('./lib/', old, new)
- self._run_file_rename('./lib/', old, new)
- self._run_cpp_qa(module, old, new)
-
- def _run_cpp_qa(self, module, old, new):
- path = './lib/'
- filename = 'qa_' + module + '.cc'
- nsubs = self._run_file_replace(path + filename, old, new)
- if nsubs > 0:
- logger.info("C++ QA code detected, renaming...")
- filename = 'qa_' + old + '.cc'
- self._run_file_replace(path + filename, old, new)
- filename = 'qa_' + old + '.h'
- self._run_file_replace(path + filename, old, new)
- self._run_file_replace(path + filename, old.upper(), new.upper())
- self._run_file_rename(path, 'qa_' + old, 'qa_' + new)
- else:
- logger.info("No C++ QA code detected, skipping...")
-
- def _run_include(self, module, old, new):
- path = './include/' + module + '/'
- filename = path + old + '.h'
- self._run_file_replace(filename, old, new)
- self._run_file_replace(filename, old.upper(), new.upper()) # take care of include guards
- self._run_cmakelists(path, old, new)
- self._run_file_rename(path, old, new)
-
- def _run_python(self, module, old, new):
- path = './python/'
- filename = '__init__.py'
- nsubs = self._run_file_replace(path + filename, old, new)
- if nsubs > 0:
- logger.info("Python block detected, renaming...")
- filename = old + '.py'
- self._run_file_replace(path + filename, old, new)
- self._run_cmakelists(path, old, new)
- self._run_file_rename(path, old, new)
- else:
- logger.info("Not a Python block, nothing to do here...")
-
- def _run_python_qa(self, module, old, new):
- new = 'qa_' + new
- old = 'qa_' + old
- filename = './python/' + old + '.py'
- self._run_file_replace(filename, old, new)
- self._run_cmakelists('./python/', old, new)
- self._run_file_rename('./python/', old, new)
-
- def _run_grc_rename(self, module, old, new):
- grcfile = './grc/' + module + '_' + old + '.yml'
- self._run_file_replace(grcfile, old, new)
- self._run_cmakelists('./grc/', old, new)
- self._run_file_rename('./grc/', module + '_' + old, module + '_' + new)
-
- def _run_cmakelists(self, path, first, second):
- filename = path + 'CMakeLists.txt'
- nsubs = self._run_file_replace(filename, first, second)
- if nsubs < 1:
- logger.info("'{}' wasn't in '{}'.".format(first, filename))
-
- def _run_file_rename(self, path, old, new):
- files = os.listdir(path)
- for file in files:
- if file.find(old) > -1 and file.find(old) < 3:
- nl = file.replace(old, new)
- src = path + file
- dst = path + nl
- logger.info("Renaming file '{}' to '{}'.".format(src, dst))
- os.rename(src, dst)
-
- def _run_file_replace(self, filename, old, new):
- if not os.path.isfile(filename):
- return False
- else:
- logger.info("In '{}' renaming occurrences of '{}' to '{}'".format(filename, old, new))
-
- with open(filename) as f:
- cfile = f.read()
- (cfile, nsubs) = re.subn(old, new, cfile)
-
- with open(filename, 'w') as f:
- f.write(cfile)
- self.scm.mark_file_updated(filename)
- return nsubs
diff --git a/gr-utils/python/modtool/core/rm.py b/gr-utils/python/modtool/core/rm.py
deleted file mode 100644
index 72c9da4666..0000000000
--- a/gr-utils/python/modtool/core/rm.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#
-# Copyright 2013, 2018, 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Remove blocks module """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import sys
-import glob
-import logging
-
-from ..tools import remove_pattern_from_file, CMakeFileEditor
-from .base import ModTool, ModToolException
-
-logger = logging.getLogger(__name__)
-
-
-class ModToolRemove(ModTool):
- """ Remove block (delete files and remove Makefile entries) """
- name = 'remove'
- description = 'Remove a block from a module.'
-
- def __init__(self, blockname=None, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['pattern'] = blockname
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if not self.info['pattern'] or self.info['pattern'].isspace():
- raise ModToolException("Incorrect blockname (Regex)!")
-
- def run(self):
- """ Go, go, go! """
- # This portion will be covered by the CLI
- if not self.cli:
- self.validate()
- else:
- from ..cli import cli_input
- def _remove_cc_test_case(filename=None, ed=None):
- """ Special function that removes the occurrences of a qa*.cc file
- from the CMakeLists.txt. """
- if filename[:2] != 'qa':
- return
- if self.info['version'] == '37':
- (base, ext) = os.path.splitext(filename)
- if ext == '.h':
- remove_pattern_from_file(self._file['qalib'],
- r'^#include "{}"\s*$'.format(filename))
- remove_pattern_from_file(self._file['qalib'],
- r'^\s*s->addTest\(gr::{}::{}::suite\(\)\);\s*$'.format(
- self.info['modname'], base)
- )
- self.scm.mark_file_updated(self._file['qalib'])
- elif ext == '.cc':
- ed.remove_value('list',
- r'\$\{CMAKE_CURRENT_SOURCE_DIR\}/%s' % filename,
- to_ignore_start='APPEND test_{}_sources'.format(self.info['modname']))
- self.scm.mark_file_updated(ed.filename)
- elif self.info['version'] == '38':
- (base, ext) = os.path.splitext(filename)
- if ext == '.cc':
- ed.remove_value(
- 'list', filename,
- to_ignore_start='APPEND test_{}_sources'.format(self.info['modname']))
- self.scm.mark_file_updated(ed.filename)
- else:
- filebase = os.path.splitext(filename)[0]
- ed.delete_entry('add_executable', filebase)
- ed.delete_entry('target_link_libraries', filebase)
- ed.delete_entry('GR_ADD_TEST', filebase)
- ed.remove_double_newlines()
- self.scm.mark_file_updated(ed.filename)
-
- def _remove_py_test_case(filename=None, ed=None):
- """ Special function that removes the occurrences of a qa*.{cc,h} file
- from the CMakeLists.txt and the qa_$modname.cc. """
- if filename[:2] != 'qa':
- return
- filebase = os.path.splitext(filename)[0]
- ed.delete_entry('GR_ADD_TEST', filebase)
- ed.remove_double_newlines()
-
- def _make_swig_regex(filename):
- filebase = os.path.splitext(filename)[0]
- pyblockname = filebase.replace(self.info['modname'] + '_', '')
- regexp = r'(^\s*GR_SWIG_BLOCK_MAGIC2?\({},\s*{}\);|^\s*.include\s*"({}/)?{}"\s*)'.format \
- (self.info['modname'], pyblockname, self.info['modname'], filename)
- return regexp
- # Go, go, go!
- if not self.skip_subdirs['lib']:
- self._run_subdir('lib', ('*.cc', '*.h'), ('add_library', 'list'),
- cmakeedit_func=_remove_cc_test_case)
- if not self.skip_subdirs['include']:
- incl_files_deleted = self._run_subdir(self.info['includedir'], ('*.h',), ('install',))
- if not self.skip_subdirs['swig']:
- swig_files_deleted = self._run_subdir('swig', ('*.i',), ('install',))
- for f in incl_files_deleted + swig_files_deleted:
- # TODO do this on all *.i files
- remove_pattern_from_file(self._file['swig'], _make_swig_regex(f))
- self.scm.mark_file_updated(self._file['swig'])
- if not self.skip_subdirs['python']:
- py_files_deleted = self._run_subdir('python', ('*.py',), ('GR_PYTHON_INSTALL',),
- cmakeedit_func=_remove_py_test_case)
- for f in py_files_deleted:
- remove_pattern_from_file(self._file['pyinit'], r'.*import\s+{}.*'.format(f[:-3]))
- remove_pattern_from_file(self._file['pyinit'], r'.*from\s+{}\s+import.*\n'.format(f[:-3]))
- if not self.skip_subdirs['grc']:
- self._run_subdir('grc', ('*.yml',), ('install',))
-
- def _run_subdir(self, path, globs, makefile_vars, cmakeedit_func=None):
- """ Delete all files that match a certain pattern in path.
- path - The directory in which this will take place
- globs - A tuple of standard UNIX globs of files to delete (e.g. *.yml)
- makefile_vars - A tuple with a list of CMakeLists.txt-variables which
- may contain references to the globbed files
- cmakeedit_func - If the CMakeLists.txt needs special editing, use this
- """
- if self.cli:
- from ..cli import cli_input
- # 1. Create a filtered list
- files = []
- for g in globs:
- files = files + sorted(glob.glob("{}/{}".format(path, g)))
- files_filt = []
- logger.info("Searching for matching files in {}/:".format(path))
- for f in files:
- if re.search(self.info['pattern'], os.path.basename(f)) is not None:
- files_filt.append(f)
- if len(files_filt) == 0:
- logger.info("None found.")
- return []
- # 2. Delete files, Makefile entries and other occurrences
- files_deleted = []
- ed = CMakeFileEditor('{}/CMakeLists.txt'.format(path))
- yes = self.info['yes']
- for f in files_filt:
- b = os.path.basename(f)
- if not yes and self.cli:
- ans = cli_input("Really delete {}? [Y/n/a/q]: ".format(f)).lower().strip()
- if ans == 'a':
- yes = True
- if ans == 'q':
- sys.exit(0)
- if ans == 'n':
- continue
- files_deleted.append(b)
- logger.info("Deleting {}.".format(f))
- self.scm.remove_file(f)
- os.unlink(f)
- logger.info("Deleting occurrences of {} from {}/CMakeLists.txt...".format(b, path))
- for var in makefile_vars:
- ed.remove_value(var, b)
- if cmakeedit_func is not None:
- cmakeedit_func(b, ed)
- ed.write()
- self.scm.mark_files_updated(('{}/CMakeLists.txt'.format(path)))
- return files_deleted
diff --git a/gr-utils/python/modtool/core/update.py b/gr-utils/python/modtool/core/update.py
deleted file mode 100644
index 2d3af1e7e0..0000000000
--- a/gr-utils/python/modtool/core/update.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Module to convert XML bindings to YAML bindings """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-import re
-import glob
-import logging
-
-from .base import ModTool, ModToolException
-from ..tools import get_modname
-
-logger = logging.getLogger(__name__)
-
-
-def get_xml_candidates():
- """ Returns a list of XML candidates for update """
- xml_candidates = []
- xml_files = [x for x in glob.glob1("grc", "*.xml")]
- mod_name = get_modname()
- for candidate in xml_files:
- candidate = os.path.splitext(candidate)[0]
- candidate = candidate.split(mod_name + "_", 1)[-1]
- xml_candidates.append(candidate)
- return xml_candidates
-
-
-class ModToolUpdate(ModTool):
- """ Update the grc bindings for a block """
- name = 'update'
- description = 'Update the grc bindings for a block'
-
- def __init__(self, blockname=None, complete=False, include_blacklisted=False, **kwargs):
- ModTool.__init__(self, blockname, **kwargs)
- self.info['complete'] = complete
- self.info['include_blacklisted'] = include_blacklisted
-
-
- def validate(self):
- """ Validates the arguments """
- ModTool._validate(self)
- if self.info['complete']:
- return
- if not self.info['blockname'] or self.info['blockname'].isspace():
- raise ModToolException('Block name not specified!')
- block_candidates = get_xml_candidates()
- if self.info['blockname'] not in block_candidates:
- choices = [x for x in block_candidates if self.info['blockname'] in x]
- if len(choices) > 0:
- print("Suggested alternatives: "+str(choices))
- raise ModToolException("The XML bindings does not exists!")
-
- def run(self):
- from gnuradio.grc.converter import Converter
- if not self.cli:
- self.validate()
- logger.warning("Warning: This is an experimental feature. Please verify the bindings.")
- module_name = self.info['modname']
- path = './grc/'
- conv = Converter(path, path)
- if self.info['complete']:
- blocks = get_xml_candidates()
- else:
- blocks = [self.info['blockname']]
- for blockname in blocks:
- xml_file = "{}_{}.xml".format(module_name, blockname)
- yml_file = "{}_{}.block.yml".format(module_name, blockname)
- if not conv.load_block_xml(path+xml_file, self.info["include_blacklisted"]):
- continue
- logger.info("Converted {} to {}".format(xml_file, yml_file))
- os.remove(path+xml_file)
- nsubs = self._run_cmakelists(xml_file, yml_file)
- if nsubs > 1:
- logger.warning("Changed more than expected for the block '%s' in the CMakeLists.txt. "
- "Please verify the CMakeLists manually.", blockname)
- elif nsubs == 0:
- logger.warning("No entry found for the block '%s' in the CMakeLists.txt. "
- 'Please verify the CMakeLists manually.', blockname)
- else:
- logger.info('Updated the CMakeLists.txt')
-
- def _run_cmakelists(self, to_remove, to_add):
- """ Changes in the CMakeLists """
- filename = './grc/CMakeLists.txt'
- with open(filename) as f:
- cfile = f.read()
- (cfile, nsubs) = re.subn(to_remove, to_add, cfile)
- with open(filename, 'w') as f:
- f.write(cfile)
- self.scm.mark_file_updated(filename)
- return nsubs
diff --git a/gr-utils/python/modtool/modtool.conf.in b/gr-utils/python/modtool/modtool.conf.in
deleted file mode 100644
index 8b3df1b9cf..0000000000
--- a/gr-utils/python/modtool/modtool.conf.in
+++ /dev/null
@@ -1,6 +0,0 @@
-# This file contains system wide configuration data for GNU Radio.
-# You may override any setting on a per-user basis by editing
-# ~/.gnuradio/config.conf
-
-[modtool]
-newmod_path = @newmoddir@
diff --git a/gr-utils/python/modtool/templates/CMakeLists.txt b/gr-utils/python/modtool/templates/CMakeLists.txt
deleted file mode 100644
index 80b9a8476c..0000000000
--- a/gr-utils/python/modtool/templates/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- templates.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/modtool/templates
-)
-
-set(GR_PKG_MODTOOL_DATA_DIR ${GR_PKG_DATA_DIR}/modtool/templates)
-install(DIRECTORY gr-newmod
- DESTINATION ${GR_PKG_MODTOOL_DATA_DIR}
-)
diff --git a/gr-utils/python/modtool/templates/__init__.py b/gr-utils/python/modtool/templates/__init__.py
deleted file mode 100644
index 3c185c93e1..0000000000
--- a/gr-utils/python/modtool/templates/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from .templates import Templates
diff --git a/gr-utils/python/modtool/templates/gr-newmod/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/CMakeLists.txt
deleted file mode 100644
index 5f59fb2986..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/CMakeLists.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2011,2012,2014,2016,2018 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Project setup
-########################################################################
-cmake_minimum_required(VERSION 3.8)
-project(gr-howto CXX C)
-enable_testing()
-
-# Install to PyBOMBS target prefix if defined
-if(DEFINED ENV{PYBOMBS_PREFIX})
- set(CMAKE_INSTALL_PREFIX $ENV{PYBOMBS_PREFIX})
- message(STATUS "PyBOMBS installed GNU Radio. Setting CMAKE_INSTALL_PREFIX to $ENV{PYBOMBS_PREFIX}")
-endif()
-
-# Select the release build type by default to get optimization flags
-if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release")
- message(STATUS "Build type not specified: defaulting to release.")
-endif(NOT CMAKE_BUILD_TYPE)
-set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "")
-
-# Make sure our local CMake Modules path comes first
-list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules)
-
-# Set the version information here
-set(VERSION_MAJOR 1)
-set(VERSION_API 0)
-set(VERSION_ABI 0)
-set(VERSION_PATCH git)
-
-cmake_policy(SET CMP0011 NEW)
-
-# Enable generation of compile_commands.json for code completion engines
-set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
-
-########################################################################
-# Compiler specific setup
-########################################################################
-if((CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR
- CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- AND NOT WIN32)
- #http://gcc.gnu.org/wiki/Visibility
- add_definitions(-fvisibility=hidden)
-endif()
-
-IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- SET(CMAKE_CXX_STANDARD 11)
-ELSEIF(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- SET(CMAKE_CXX_STANDARD 11)
-ELSEIF(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
- SET(CMAKE_CXX_STANDARD 11)
-ELSE()
- message(WARNING "C++ standard could not be set because compiler is not GNU, Clang or MSVC.")
-ENDIF()
-
-IF(CMAKE_C_COMPILER_ID STREQUAL "GNU")
- SET(CMAKE_C_STANDARD 11)
-ELSEIF(CMAKE_C_COMPILER_ID MATCHES "Clang")
- SET(CMAKE_C_STANDARD 11)
-ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "MSVC")
- SET(CMAKE_C_STANDARD 11)
-ELSE()
- message(WARNING "C standard could not be set because compiler is not GNU, Clang or MSVC.")
-ENDIF()
-
-########################################################################
-# Install directories
-########################################################################
-find_package(Gnuradio "3.9" REQUIRED)
-include(GrVersion)
-
-include(GrPlatform) #define LIB_SUFFIX
-
-if(NOT CMAKE_MODULES_DIR)
- set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake)
-endif(NOT CMAKE_MODULES_DIR)
-
-set(GR_INCLUDE_DIR include/howto)
-set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/howto)
-set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME})
-set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME})
-set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d)
-set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME})
-
-########################################################################
-# On Apple only, set install name and use rpath correctly, if not already set
-########################################################################
-if(APPLE)
- if(NOT CMAKE_INSTALL_NAME_DIR)
- set(CMAKE_INSTALL_NAME_DIR
- ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
- PATH "Library Install Name Destination Directory" FORCE)
- endif(NOT CMAKE_INSTALL_NAME_DIR)
- if(NOT CMAKE_INSTALL_RPATH)
- set(CMAKE_INSTALL_RPATH
- ${CMAKE_INSTALL_PREFIX}/${GR_LIBRARY_DIR} CACHE
- PATH "Library Install RPath" FORCE)
- endif(NOT CMAKE_INSTALL_RPATH)
- if(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
- set(CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE
- BOOL "Do Build Using Library Install RPath" FORCE)
- endif(NOT CMAKE_BUILD_WITH_INSTALL_RPATH)
-endif(APPLE)
-
-########################################################################
-# Find gnuradio build dependencies
-########################################################################
-find_package(Doxygen)
-
-########################################################################
-# Setup doxygen option
-########################################################################
-if(DOXYGEN_FOUND)
- option(ENABLE_DOXYGEN "Build docs using Doxygen" ON)
-else(DOXYGEN_FOUND)
- option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF)
-endif(DOXYGEN_FOUND)
-
-########################################################################
-# Create uninstall target
-########################################################################
-configure_file(
- ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
- ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
-@ONLY)
-
-add_custom_target(uninstall
- ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake
- )
-
-
-########################################################################
-# Add subdirectories
-########################################################################
-add_subdirectory(include/howto)
-add_subdirectory(lib)
-add_subdirectory(apps)
-add_subdirectory(docs)
-add_subdirectory(swig)
-add_subdirectory(python)
-add_subdirectory(grc)
-
-########################################################################
-# Install cmake search helper for this library
-########################################################################
-
-install(FILES cmake/Modules/howtoConfig.cmake
- DESTINATION ${CMAKE_MODULES_DIR}/howto
-)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/MANIFEST.md b/gr-utils/python/modtool/templates/gr-newmod/MANIFEST.md
deleted file mode 100644
index dce8fbd5e8..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/MANIFEST.md
+++ /dev/null
@@ -1,17 +0,0 @@
-title: The HOWTO OOT Module
-brief: Short description of gr-howto
-tags: # Tags are arbitrary, but look at CGRAN what other authors are using
- - sdr
-author:
- - Author Name <authors@email.address>
-copyright_owner:
- - Copyright Owner 1
-license:
-gr_supported_version: # Put a comma separated list of supported GR versions here
-#repo: # Put the URL of the repository here, or leave blank for default
-#website: <module_website> # If you have a separate project website, put it here
-#icon: <icon_url> # Put a URL to a square image here that will be used as an icon on CGRAN
----
-A longer, multi-line description of gr-howto.
-You may use some *basic* Markdown here.
-If left empty, it will try to find a README file instead.
diff --git a/gr-utils/python/modtool/templates/gr-newmod/apps/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/apps/CMakeLists.txt
deleted file mode 100644
index 0e1dfaee15..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/apps/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(
- PROGRAMS
- DESTINATION bin
-)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/CMakeParseArgumentsCopy.cmake b/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/CMakeParseArgumentsCopy.cmake
deleted file mode 100644
index 66016cb2ff..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/CMakeParseArgumentsCopy.cmake
+++ /dev/null
@@ -1,138 +0,0 @@
-# CMAKE_PARSE_ARGUMENTS(<prefix> <options> <one_value_keywords> <multi_value_keywords> args...)
-#
-# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for
-# parsing the arguments given to that macro or function.
-# It processes the arguments and defines a set of variables which hold the
-# values of the respective options.
-#
-# The <options> argument contains all options for the respective macro,
-# i.e. keywords which can be used when calling the macro without any value
-# following, like e.g. the OPTIONAL keyword of the install() command.
-#
-# The <one_value_keywords> argument contains all keywords for this macro
-# which are followed by one value, like e.g. DESTINATION keyword of the
-# install() command.
-#
-# The <multi_value_keywords> argument contains all keywords for this macro
-# which can be followed by more than one value, like e.g. the TARGETS or
-# FILES keywords of the install() command.
-#
-# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the
-# keywords listed in <options>, <one_value_keywords> and
-# <multi_value_keywords> a variable composed of the given <prefix>
-# followed by "_" and the name of the respective keyword.
-# These variables will then hold the respective value from the argument list.
-# For the <options> keywords this will be TRUE or FALSE.
-#
-# All remaining arguments are collected in a variable
-# <prefix>_UNPARSED_ARGUMENTS, this can be checked afterwards to see whether
-# your macro was called with unrecognized parameters.
-#
-# As an example here a my_install() macro, which takes similar arguments as the
-# real install() command:
-#
-# function(MY_INSTALL)
-# set(options OPTIONAL FAST)
-# set(oneValueArgs DESTINATION RENAME)
-# set(multiValueArgs TARGETS CONFIGURATIONS)
-# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
-# ...
-#
-# Assume my_install() has been called like this:
-# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub)
-#
-# After the cmake_parse_arguments() call the macro will have set the following
-# variables:
-# MY_INSTALL_OPTIONAL = TRUE
-# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install()
-# MY_INSTALL_DESTINATION = "bin"
-# MY_INSTALL_RENAME = "" (was not used)
-# MY_INSTALL_TARGETS = "foo;bar"
-# MY_INSTALL_CONFIGURATIONS = "" (was not used)
-# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL"
-#
-# You can the continue and process these variables.
-#
-# Keywords terminate lists of values, e.g. if directly after a one_value_keyword
-# another recognized keyword follows, this is interpreted as the beginning of
-# the new option.
-# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
-# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
-# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefore.
-
-#=============================================================================
-# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
-#
-# Distributed under the OSI-approved BSD License (the "License");
-# see accompanying file Copyright.txt for details.
-#
-# This software is distributed WITHOUT ANY WARRANTY; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the License for more information.
-#=============================================================================
-# (To distribute this file outside of CMake, substitute the full
-# License text for the above reference.)
-
-
-if(__CMAKE_PARSE_ARGUMENTS_INCLUDED)
- return()
-endif()
-set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE)
-
-
-function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames)
- # first set all result variables to empty/FALSE
- foreach(arg_name ${_singleArgNames} ${_multiArgNames})
- set(${prefix}_${arg_name})
- endforeach(arg_name)
-
- foreach(option ${_optionNames})
- set(${prefix}_${option} FALSE)
- endforeach(option)
-
- set(${prefix}_UNPARSED_ARGUMENTS)
-
- set(insideValues FALSE)
- set(currentArgName)
-
- # now iterate over all arguments and fill the result variables
- foreach(currentArg ${ARGN})
- list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword
- list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword
- list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword
-
- if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1)
- if(insideValues)
- if("${insideValues}" STREQUAL "SINGLE")
- set(${prefix}_${currentArgName} ${currentArg})
- set(insideValues FALSE)
- elseif("${insideValues}" STREQUAL "MULTI")
- list(APPEND ${prefix}_${currentArgName} ${currentArg})
- endif()
- else(insideValues)
- list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg})
- endif(insideValues)
- else()
- if(NOT ${optionIndex} EQUAL -1)
- set(${prefix}_${currentArg} TRUE)
- set(insideValues FALSE)
- elseif(NOT ${singleArgIndex} EQUAL -1)
- set(currentArgName ${currentArg})
- set(${prefix}_${currentArgName})
- set(insideValues "SINGLE")
- elseif(NOT ${multiArgIndex} EQUAL -1)
- set(currentArgName ${currentArg})
- set(${prefix}_${currentArgName})
- set(insideValues "MULTI")
- endif()
- endif()
-
- endforeach(currentArg)
-
- # propagate the result variables to the caller:
- foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames})
- set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE)
- endforeach(arg_name)
- set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE)
-
-endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/howtoConfig.cmake b/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/howtoConfig.cmake
deleted file mode 100644
index 786d8242b4..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/howtoConfig.cmake
+++ /dev/null
@@ -1,31 +0,0 @@
-INCLUDE(FindPkgConfig)
-PKG_CHECK_MODULES(PC_HOWTO howto)
-
-FIND_PATH(
- HOWTO_INCLUDE_DIRS
- NAMES howto/api.h
- HINTS $ENV{HOWTO_DIR}/include
- ${PC_HOWTO_INCLUDEDIR}
- PATHS ${CMAKE_INSTALL_PREFIX}/include
- /usr/local/include
- /usr/include
-)
-
-FIND_LIBRARY(
- HOWTO_LIBRARIES
- NAMES gnuradio-howto
- HINTS $ENV{HOWTO_DIR}/lib
- ${PC_HOWTO_LIBDIR}
- PATHS ${CMAKE_INSTALL_PREFIX}/lib
- ${CMAKE_INSTALL_PREFIX}/lib64
- /usr/local/lib
- /usr/local/lib64
- /usr/lib
- /usr/lib64
- )
-
-include("${CMAKE_CURRENT_LIST_DIR}/howtoTarget.cmake")
-
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(HOWTO DEFAULT_MSG HOWTO_LIBRARIES HOWTO_INCLUDE_DIRS)
-MARK_AS_ADVANCED(HOWTO_LIBRARIES HOWTO_INCLUDE_DIRS)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/targetConfig.cmake.in b/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/targetConfig.cmake.in
deleted file mode 100644
index 4a1fb312d7..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/cmake/Modules/targetConfig.cmake.in
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(CMakeFindDependencyMacro)
-
-set(target_deps "@TARGET_DEPENDENCIES@")
-foreach(dep IN LISTS target_deps)
- find_dependency(${dep})
-endforeach()
-include("${CMAKE_CURRENT_LIST_DIR}/@TARGET@Targets.cmake")
diff --git a/gr-utils/python/modtool/templates/gr-newmod/cmake/cmake_uninstall.cmake.in b/gr-utils/python/modtool/templates/gr-newmod/cmake/cmake_uninstall.cmake.in
deleted file mode 100644
index 9ae1ae4bd6..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/cmake/cmake_uninstall.cmake.in
+++ /dev/null
@@ -1,32 +0,0 @@
-# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F
-
-IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
- MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"")
-ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
-
-FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
-STRING(REGEX REPLACE "\n" ";" files "${files}")
-FOREACH(file ${files})
- MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"")
- IF(EXISTS "$ENV{DESTDIR}${file}")
- EXEC_PROGRAM(
- "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
- OUTPUT_VARIABLE rm_out
- RETURN_VALUE rm_retval
- )
- IF(NOT "${rm_retval}" STREQUAL 0)
- MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
- ENDIF(NOT "${rm_retval}" STREQUAL 0)
- ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}")
- EXEC_PROGRAM(
- "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
- OUTPUT_VARIABLE rm_out
- RETURN_VALUE rm_retval
- )
- IF(NOT "${rm_retval}" STREQUAL 0)
- MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
- ENDIF(NOT "${rm_retval}" STREQUAL 0)
- ELSE(EXISTS "$ENV{DESTDIR}${file}")
- MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
- ENDIF(EXISTS "$ENV{DESTDIR}${file}")
-ENDFOREACH(file)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/docs/CMakeLists.txt
deleted file mode 100644
index 8b76ace40b..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/CMakeLists.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Setup dependencies
-########################################################################
-find_package(Doxygen)
-
-########################################################################
-# Begin conditional configuration
-########################################################################
-if(ENABLE_DOXYGEN)
-
-########################################################################
-# Add subdirectories
-########################################################################
-add_subdirectory(doxygen)
-
-endif(ENABLE_DOXYGEN)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/README.howto b/gr-utils/python/modtool/templates/gr-newmod/docs/README.howto
deleted file mode 100644
index b29ce00579..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/README.howto
+++ /dev/null
@@ -1,11 +0,0 @@
-This is the howto-write-a-block package meant as a guide to building
-out-of-tree packages. To use the howto blocks, the Python namespaces
-is in 'howto', which is imported as:
-
- import howto
-
-See the Doxygen documentation for details about the blocks available
-in this package. A quick listing of the details can be found in Python
-after importing by using:
-
- help(howto)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/CMakeLists.txt
deleted file mode 100644
index a80d07ea4a..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/CMakeLists.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Create the doxygen configuration file
-########################################################################
-file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir)
-file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir)
-file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir)
-file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir)
-
-set(HAVE_DOT ${DOXYGEN_DOT_FOUND})
-set(enable_html_docs YES)
-set(enable_latex_docs NO)
-set(enable_xml_docs YES)
-
-configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
- ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
-@ONLY)
-
-set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html)
-
-########################################################################
-# Make and install doxygen docs
-########################################################################
-add_custom_command(
- OUTPUT ${BUILT_DIRS}
- COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMENT "Generating documentation with doxygen"
-)
-
-add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS})
-
-install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR})
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.in b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.in
deleted file mode 100644
index 55816e8252..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.in
+++ /dev/null
@@ -1,1896 +0,0 @@
-# Doxyfile 1.8.4
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed
-# in front of the TAG it is preceding .
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME = "GNU Radio's HOWTO Package"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY =
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
-# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
-# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = YES
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
-# started.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = YES
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
-# files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-
-AUTOLINK_SUPPORT = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = YES
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES (the
-# default) will make doxygen replace the get and set methods by a property in
-# the documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields or simple typedef fields will be shown
-# inline in the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO (the default), structs, classes, and unions are shown on a separate
-# page (for HTML and Man pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can
-# be an expensive process and often the same symbol appear multiple times in
-# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
-# small doxygen will become slower. If the cache is too large, memory is wasted.
-# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
-# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
-# symbols.
-
-LOOKUP_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-
-EXTRACT_PACKAGE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = NO
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = NO
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = NO
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= NO
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if section-label ... \endif
-# and \cond section-label ... \endcond blocks.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = NO
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path. Do not use
-# file names with spaces, bibtex cannot handle them.
-
-CITE_BIB_FILES =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text "
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = "@top_srcdir@" \
- "@top_builddir@"
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS = *.h \
- *.dox
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE = @abs_top_builddir@/docs/doxygen/html \
- @abs_top_builddir@/docs/doxygen/xml \
- @abs_top_builddir@/docs/doxygen/other/doxypy.py \
- @abs_top_builddir@/_CPack_Packages \
- @abs_top_srcdir@/cmake
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS = */.deps/* \
- */.libs/* \
- */.svn/* \
- */CVS/* \
- */__init__.py \
- */qa_*.cc \
- */qa_*.h \
- */qa_*.py
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS = ad9862 \
- numpy \
- *swig* \
- *Swig* \
- *my_top_block* \
- *my_graph* \
- *app_top_block* \
- *am_rx_graph* \
- *_queue_watcher_thread* \
- *parse* \
- *MyFrame* \
- *MyApp* \
- *PyObject* \
- *wfm_rx_block* \
- *_sptr* \
- *debug* \
- *wfm_rx_sca_block* \
- *tv_rx_block* \
- *wxapt_rx_block* \
- *example_signal*
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be ignored.
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS = *.py="@top_srcdir@"/doc/doxygen/other/doxypy.py
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS = NO
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = YES
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = YES
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = YES
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = @enable_html_docs@
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
-# tag will in the future become obsolete.
-
-HTML_STYLESHEET =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefore more
-# robust against future updates. Doxygen will copy the style sheet file to
-# the output directory.
-
-HTML_EXTRA_STYLESHEET =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP = NO
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
-# style string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = YES
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX = YES
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW = YES
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 180
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
-# SVG. The default value is HTML-CSS, which is slower, but has the best
-# compatibility.
-
-MATHJAX_FORMAT = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
-# pieces of code that will be used on startup of the MathJax code.
-
-MATHJAX_CODEFILE =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE = NO
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript.
-# There are two flavours of web server based search depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools.
-# See the manual for details.
-
-SERVER_BASED_SEARCH = NO
-
-# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain
-# the search results. Doxygen ships with an example indexer (doxyindexer) and
-# search engine (doxysearch.cgi) which are based on the open source search
-# engine library Xapian. See the manual for configuration details.
-
-EXTERNAL_SEARCH = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will returned the search results when EXTERNAL_SEARCH is enabled.
-# Doxygen ships with an example search engine (doxysearch) which is based on
-# the open source search engine library Xapian. See the manual for configuration
-# details.
-
-SEARCHENGINE_URL =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-
-SEARCHDATA_FILE = searchdata.xml
-
-# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-
-EXTERNAL_SEARCH_ID =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
-# of to a relative location where the documentation can be found.
-# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
-
-EXTRA_SEARCH_MAPPINGS =
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = @enable_latex_docs@
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4 will be used.
-
-PAPER_TYPE = letter
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
-# or other source files which should be copied to the LaTeX output directory.
-# Note that the files will be copied as-is; there are no commands or markers
-# available.
-
-LATEX_EXTRA_FILES =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = NO
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = @enable_xml_docs@
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
-# that can be used to generate PDF.
-
-GENERATE_DOCBOOK = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it. If left blank docbook will be used as the default path.
-
-DOCBOOK_OUTPUT = docbook
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
-# in the related pages index. If set to NO, only the current project's
-# pages will be listed.
-
-EXTERNAL_PAGES = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = @HAVE_DOT@
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = NO
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# manageable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = YES
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.swig_doc.in b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.swig_doc.in
deleted file mode 100644
index 131648b2ab..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/Doxyfile.swig_doc.in
+++ /dev/null
@@ -1,1864 +0,0 @@
-# Doxyfile 1.8.4
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed
-# in front of the TAG it is preceding .
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
-
-PROJECT_NAME = @CPACK_PACKAGE_NAME@
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = "@OUTPUT_DIRECTORY@"
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
-# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
-# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
-# started.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
-
-TCL_SUBST =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
-# files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
-
-MARKDOWN_SUPPORT = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-
-AUTOLINK_SUPPORT = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = YES
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES (the
-# default) will make doxygen replace the get and set methods by a property in
-# the documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields or simple typedef fields will be shown
-# inline in the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO (the default), structs, classes, and unions are shown on a separate
-# page (for HTML and Man pages) or section (for LaTeX and RTF).
-
-INLINE_SIMPLE_STRUCTS = NO
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can
-# be an expensive process and often the same symbol appear multiple times in
-# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
-# small doxygen will become slower. If the cache is too large, memory is wasted.
-# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
-# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
-# symbols.
-
-LOOKUP_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-
-EXTRACT_PACKAGE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if section-label ... \endif
-# and \cond section-label ... \endcond blocks.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path. Do not use
-# file names with spaces, bibtex cannot handle them.
-
-CITE_BIB_FILES =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = @INPUT_PATHS@
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS = *.h
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be ignored.
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = NO
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = NO
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
-# tag will in the future become obsolete.
-
-HTML_STYLESHEET =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefore more
-# robust against future updates. Doxygen will copy the style sheet file to
-# the output directory.
-
-HTML_EXTRA_STYLESHEET =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
-
-HTML_EXTRA_FILES =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP = NO
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
-# style string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
-
-DISABLE_INDEX = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
-
-GENERATE_TREEVIEW = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
-# SVG. The default value is HTML-CSS, which is slower, but has the best
-# compatibility.
-
-MATHJAX_FORMAT = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.
-# However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
-
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
-
-MATHJAX_EXTENSIONS =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
-# pieces of code that will be used on startup of the MathJax code.
-
-MATHJAX_CODEFILE =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript.
-# There are two flavours of web server based search depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools.
-# See the manual for details.
-
-SERVER_BASED_SEARCH = NO
-
-# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain
-# the search results. Doxygen ships with an example indexer (doxyindexer) and
-# search engine (doxysearch.cgi) which are based on the open source search
-# engine library Xapian. See the manual for configuration details.
-
-EXTERNAL_SEARCH = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will returned the search results when EXTERNAL_SEARCH is enabled.
-# Doxygen ships with an example search engine (doxysearch) which is based on
-# the open source search engine library Xapian. See the manual for configuration
-# details.
-
-SEARCHENGINE_URL =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-
-SEARCHDATA_FILE = searchdata.xml
-
-# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-
-EXTERNAL_SEARCH_ID =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
-# of to a relative location where the documentation can be found.
-# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
-
-EXTRA_SEARCH_MAPPINGS =
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = NO
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4 will be used.
-
-PAPER_TYPE = a4wide
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
-
-LATEX_FOOTER =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
-# or other source files which should be copied to the LaTeX output directory.
-# Note that the files will be copied as-is; there are no commands or markers
-# available.
-
-LATEX_EXTRA_FILES =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = NO
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = NO
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
-
-LATEX_BIB_STYLE = plain
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = YES
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
-# that can be used to generate PDF.
-
-GENERATE_DOCBOOK = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it. If left blank docbook will be used as the default path.
-
-DOCBOOK_OUTPUT = docbook
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
-# in the related pages index. If set to NO, only the current project's
-# pages will be listed.
-
-EXTERNAL_PAGES = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS = 0
-
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
-
-DOT_FONTNAME = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# manageable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
-
-UML_LIMIT_NUM_FIELDS = 10
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = YES
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
-
-DOT_IMAGE_FORMAT = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
-
-INTERACTIVE_SVG = NO
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = YES
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/__init__.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/__init__.py
deleted file mode 100644
index b9476e6030..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/__init__.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# Copyright 2010 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-"""
-Python interface to contents of doxygen xml documentation.
-
-Example use:
-See the contents of the example folder for the C++ and
-doxygen-generated xml used in this example.
-
->>> # Parse the doxygen docs.
->>> import os
->>> this_dir = os.path.dirname(globals()['__file__'])
->>> xml_path = this_dir + "/example/xml/"
->>> di = DoxyIndex(xml_path)
-
-Get a list of all top-level objects.
-
->>> print([mem.name() for mem in di.members()])
-[u'Aadvark', u'aadvarky_enough', u'main']
-
-Get all functions.
-
->>> print([mem.name() for mem in di.in_category(DoxyFunction)])
-[u'aadvarky_enough', u'main']
-
-Check if an object is present.
-
->>> di.has_member(u'Aadvark')
-True
->>> di.has_member(u'Fish')
-False
-
-Get an item by name and check its properties.
-
->>> aad = di.get_member(u'Aadvark')
->>> print(aad.brief_description)
-Models the mammal Aadvark.
->>> print(aad.detailed_description)
-Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.
-<BLANKLINE>
-This line is uninformative and is only to test line breaks in the comments.
->>> [mem.name() for mem in aad.members()]
-[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness']
->>> aad.get_member(u'print').brief_description
-u'Outputs the vital aadvark statistics.'
-
-"""
-from __future__ import unicode_literals
-
-from .doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
-
-def _test():
- import os
- this_dir = os.path.dirname(globals()['__file__'])
- xml_path = this_dir + "/example/xml/"
- di = DoxyIndex(xml_path)
- # Get the Aadvark class
- aad = di.get_member('Aadvark')
- aad.brief_description
- import doctest
- return doctest.testmod()
-
-if __name__ == "__main__":
- _test()
-
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/base.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/base.py
deleted file mode 100644
index b204c004fe..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/base.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#
-# Copyright 2010 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-"""
-A base class is created.
-
-Classes based upon this are used to make more user-friendly interfaces
-to the doxygen xml docs than the generated classes provide.
-"""
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import os
-import pdb
-
-from xml.parsers.expat import ExpatError
-
-from .generated import compound
-
-
-class Base(object):
-
- class Duplicate(Exception):
- pass
-
- class NoSuchMember(Exception):
- pass
-
- class ParsingError(Exception):
- pass
-
- def __init__(self, parse_data, top=None):
- self._parsed = False
- self._error = False
- self._parse_data = parse_data
- self._members = []
- self._dict_members = {}
- self._in_category = {}
- self._data = {}
- if top is not None:
- self._xml_path = top._xml_path
- # Set up holder of references
- else:
- top = self
- self._refs = {}
- self._xml_path = parse_data
- self.top = top
-
- @classmethod
- def from_refid(cls, refid, top=None):
- """ Instantiate class from a refid rather than parsing object. """
- # First check to see if its already been instantiated.
- if top is not None and refid in top._refs:
- return top._refs[refid]
- # Otherwise create a new instance and set refid.
- inst = cls(None, top=top)
- inst.refid = refid
- inst.add_ref(inst)
- return inst
-
- @classmethod
- def from_parse_data(cls, parse_data, top=None):
- refid = getattr(parse_data, 'refid', None)
- if refid is not None and top is not None and refid in top._refs:
- return top._refs[refid]
- inst = cls(parse_data, top=top)
- if refid is not None:
- inst.refid = refid
- inst.add_ref(inst)
- return inst
-
- def add_ref(self, obj):
- if hasattr(obj, 'refid'):
- self.top._refs[obj.refid] = obj
-
- mem_classes = []
-
- def get_cls(self, mem):
- for cls in self.mem_classes:
- if cls.can_parse(mem):
- return cls
- raise Exception(("Did not find a class for object '%s'." \
- % (mem.get_name())))
-
- def convert_mem(self, mem):
- try:
- cls = self.get_cls(mem)
- converted = cls.from_parse_data(mem, self.top)
- if converted is None:
- raise Exception('No class matched this object.')
- self.add_ref(converted)
- return converted
- except Exception as e:
- print(e)
-
- @classmethod
- def includes(cls, inst):
- return isinstance(inst, cls)
-
- @classmethod
- def can_parse(cls, obj):
- return False
-
- def _parse(self):
- self._parsed = True
-
- def _get_dict_members(self, cat=None):
- """
- For given category a dictionary is returned mapping member names to
- members of that category. For names that are duplicated the name is
- mapped to None.
- """
- self.confirm_no_error()
- if cat not in self._dict_members:
- new_dict = {}
- for mem in self.in_category(cat):
- if mem.name() not in new_dict:
- new_dict[mem.name()] = mem
- else:
- new_dict[mem.name()] = self.Duplicate
- self._dict_members[cat] = new_dict
- return self._dict_members[cat]
-
- def in_category(self, cat):
- self.confirm_no_error()
- if cat is None:
- return self._members
- if cat not in self._in_category:
- self._in_category[cat] = [mem for mem in self._members
- if cat.includes(mem)]
- return self._in_category[cat]
-
- def get_member(self, name, cat=None):
- self.confirm_no_error()
- # Check if it's in a namespace or class.
- bits = name.split('::')
- first = bits[0]
- rest = '::'.join(bits[1:])
- member = self._get_dict_members(cat).get(first, self.NoSuchMember)
- # Raise any errors that are returned.
- if member in set([self.NoSuchMember, self.Duplicate]):
- raise member()
- if rest:
- return member.get_member(rest, cat=cat)
- return member
-
- def has_member(self, name, cat=None):
- try:
- mem = self.get_member(name, cat=cat)
- return True
- except self.NoSuchMember:
- return False
-
- def data(self):
- self.confirm_no_error()
- return self._data
-
- def members(self):
- self.confirm_no_error()
- return self._members
-
- def process_memberdefs(self):
- mdtss = []
- for sec in self._retrieved_data.compounddef.sectiondef:
- mdtss += sec.memberdef
- # At the moment we lose all information associated with sections.
- # Sometimes a memberdef is in several sectiondef.
- # We make sure we don't get duplicates here.
- uniques = set([])
- for mem in mdtss:
- converted = self.convert_mem(mem)
- pair = (mem.name, mem.__class__)
- if pair not in uniques:
- uniques.add(pair)
- self._members.append(converted)
-
- def retrieve_data(self):
- filename = os.path.join(self._xml_path, self.refid + '.xml')
- try:
- self._retrieved_data = compound.parse(filename)
- except ExpatError:
- print('Error in xml in file %s' % filename)
- self._error = True
- self._retrieved_data = None
-
- def check_parsed(self):
- if not self._parsed:
- self._parse()
-
- def confirm_no_error(self):
- self.check_parsed()
- if self._error:
- raise self.ParsingError()
-
- def error(self):
- self.check_parsed()
- return self._error
-
- def name(self):
- # first see if we can do it without processing.
- if self._parse_data is not None:
- return self._parse_data.name
- self.check_parsed()
- return self._retrieved_data.compounddef.name
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/doxyindex.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/doxyindex.py
deleted file mode 100644
index 5da281a7f7..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/doxyindex.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#
-# Copyright 2010 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-"""
-Classes providing more user-friendly interfaces to the doxygen xml
-docs than the generated classes provide.
-"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-
-from .generated import index
-from .base import Base
-from .text import description
-
-class DoxyIndex(Base):
- """
- Parses a doxygen xml directory.
- """
-
- __module__ = "gnuradio.utils.doxyxml"
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyIndex, self)._parse()
- self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
- for mem in self._root.compound:
- converted = self.convert_mem(mem)
- # For files and namespaces we want the contents to be
- # accessible directly from the parent rather than having
- # to go through the file object.
- if self.get_cls(mem) == DoxyFile:
- if mem.name.endswith('.h'):
- self._members += converted.members()
- self._members.append(converted)
- elif self.get_cls(mem) == DoxyNamespace:
- self._members += converted.members()
- self._members.append(converted)
- else:
- self._members.append(converted)
-
-
-def generate_swig_doc_i(self):
- """
- %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
- Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
- """
- pass
-
-
-class DoxyCompMem(Base):
-
-
- kind = None
-
- def __init__(self, *args, **kwargs):
- super(DoxyCompMem, self).__init__(*args, **kwargs)
-
- @classmethod
- def can_parse(cls, obj):
- return obj.kind == cls.kind
-
- def set_descriptions(self, parse_data):
- bd = description(getattr(parse_data, 'briefdescription', None))
- dd = description(getattr(parse_data, 'detaileddescription', None))
- self._data['brief_description'] = bd
- self._data['detailed_description'] = dd
-
- def set_parameters(self, data):
- vs = [ddc.value for ddc in data.detaileddescription.content_]
- pls = []
- for v in vs:
- if hasattr(v, 'parameterlist'):
- pls += v.parameterlist
- pis = []
- for pl in pls:
- pis += pl.parameteritem
- dpis = []
- for pi in pis:
- dpi = DoxyParameterItem(pi)
- dpi._parse()
- dpis.append(dpi)
- self._data['params'] = dpis
-
-
-class DoxyCompound(DoxyCompMem):
- pass
-
-class DoxyMember(DoxyCompMem):
- pass
-
-class DoxyFunction(DoxyMember):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'function'
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyFunction, self)._parse()
- self.set_descriptions(self._parse_data)
- self.set_parameters(self._parse_data)
- if not self._data['params']:
- # If the params weren't set by a comment then just grab the names.
- self._data['params'] = []
- prms = self._parse_data.param
- for prm in prms:
- self._data['params'].append(DoxyParam(prm))
-
- brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
- params = property(lambda self: self.data()['params'])
-
-Base.mem_classes.append(DoxyFunction)
-
-
-class DoxyParam(DoxyMember):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyParam, self)._parse()
- self.set_descriptions(self._parse_data)
- self._data['declname'] = self._parse_data.declname
-
- @property
- def description(self):
- descriptions = []
- if self.brief_description:
- descriptions.append(self.brief_description)
- if self.detailed_description:
- descriptions.append(self.detailed_description)
- return '\n\n'.join(descriptions)
-
- brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
- name = property(lambda self: self.data()['declname'])
-
-class DoxyParameterItem(DoxyMember):
- """A different representation of a parameter in Doxygen."""
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyParameterItem, self)._parse()
- names = []
- for nl in self._parse_data.parameternamelist:
- for pn in nl.parametername:
- names.append(description(pn))
- # Just take first name
- self._data['name'] = names[0]
- # Get description
- pd = description(self._parse_data.get_parameterdescription())
- self._data['description'] = pd
-
- description = property(lambda self: self.data()['description'])
- name = property(lambda self: self.data()['name'])
-
-
-class DoxyClass(DoxyCompound):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'class'
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyClass, self)._parse()
- self.retrieve_data()
- if self._error:
- return
- self.set_descriptions(self._retrieved_data.compounddef)
- self.set_parameters(self._retrieved_data.compounddef)
- # Sectiondef.kind tells about whether private or public.
- # We just ignore this for now.
- self.process_memberdefs()
-
- brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
- params = property(lambda self: self.data()['params'])
-
-Base.mem_classes.append(DoxyClass)
-
-
-class DoxyFile(DoxyCompound):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'file'
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyFile, self)._parse()
- self.retrieve_data()
- self.set_descriptions(self._retrieved_data.compounddef)
- if self._error:
- return
- self.process_memberdefs()
-
- brief_description = property(lambda self: self.data()['brief_description'])
- detailed_description = property(lambda self: self.data()['detailed_description'])
-
-Base.mem_classes.append(DoxyFile)
-
-
-class DoxyNamespace(DoxyCompound):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'namespace'
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyNamespace, self)._parse()
- self.retrieve_data()
- self.set_descriptions(self._retrieved_data.compounddef)
- if self._error:
- return
- self.process_memberdefs()
-
-Base.mem_classes.append(DoxyNamespace)
-
-
-class DoxyGroup(DoxyCompound):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'group'
-
- def _parse(self):
- if self._parsed:
- return
- super(DoxyGroup, self)._parse()
- self.retrieve_data()
- if self._error:
- return
- cdef = self._retrieved_data.compounddef
- self._data['title'] = description(cdef.title)
- # Process inner groups
- grps = cdef.innergroup
- for grp in grps:
- converted = DoxyGroup.from_refid(grp.refid, top=self.top)
- self._members.append(converted)
- # Process inner classes
- klasses = cdef.innerclass
- for kls in klasses:
- converted = DoxyClass.from_refid(kls.refid, top=self.top)
- self._members.append(converted)
- # Process normal members
- self.process_memberdefs()
-
- title = property(lambda self: self.data()['title'])
-
-
-Base.mem_classes.append(DoxyGroup)
-
-
-class DoxyFriend(DoxyMember):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kind = 'friend'
-
-Base.mem_classes.append(DoxyFriend)
-
-
-class DoxyOther(Base):
-
- __module__ = "gnuradio.utils.doxyxml"
-
- kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum',
- 'dir', 'page', 'signal', 'slot', 'property'])
-
- @classmethod
- def can_parse(cls, obj):
- return obj.kind in cls.kinds
-
-Base.mem_classes.append(DoxyOther)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/__init__.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/__init__.py
deleted file mode 100644
index 23095c1f34..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-Contains generated files produced by generateDS.py.
-
-These do the real work of parsing the doxygen xml files but the
-resultant classes are not very friendly to navigate so the rest of the
-doxyxml module processes them further.
-"""
-from __future__ import unicode_literals
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compound.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compound.py
deleted file mode 100644
index acfa0dd5c6..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compound.py
+++ /dev/null
@@ -1,505 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
-"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-
-from xml.dom import minidom
-from xml.dom import Node
-
-import sys
-
-from . import compoundsuper as supermod
-from .compoundsuper import MixedContainer
-
-
-class DoxygenTypeSub(supermod.DoxygenType):
- def __init__(self, version=None, compounddef=None):
- supermod.DoxygenType.__init__(self, version, compounddef)
-
- def find(self, details):
-
- return self.compounddef.find(details)
-
-supermod.DoxygenType.subclass = DoxygenTypeSub
-# end class DoxygenTypeSub
-
-
-class compounddefTypeSub(supermod.compounddefType):
- def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
- supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
-
- def find(self, details):
-
- if self.id == details.refid:
- return self
-
- for sectiondef in self.sectiondef:
- result = sectiondef.find(details)
- if result:
- return result
-
-
-supermod.compounddefType.subclass = compounddefTypeSub
-# end class compounddefTypeSub
-
-
-class listofallmembersTypeSub(supermod.listofallmembersType):
- def __init__(self, member=None):
- supermod.listofallmembersType.__init__(self, member)
-supermod.listofallmembersType.subclass = listofallmembersTypeSub
-# end class listofallmembersTypeSub
-
-
-class memberRefTypeSub(supermod.memberRefType):
- def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
- supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
-supermod.memberRefType.subclass = memberRefTypeSub
-# end class memberRefTypeSub
-
-
-class compoundRefTypeSub(supermod.compoundRefType):
- def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.compoundRefType.__init__(self, mixedclass_, content_)
-supermod.compoundRefType.subclass = compoundRefTypeSub
-# end class compoundRefTypeSub
-
-
-class reimplementTypeSub(supermod.reimplementType):
- def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.reimplementType.__init__(self, mixedclass_, content_)
-supermod.reimplementType.subclass = reimplementTypeSub
-# end class reimplementTypeSub
-
-
-class incTypeSub(supermod.incType):
- def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.incType.__init__(self, mixedclass_, content_)
-supermod.incType.subclass = incTypeSub
-# end class incTypeSub
-
-
-class refTypeSub(supermod.refType):
- def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.refType.__init__(self, mixedclass_, content_)
-supermod.refType.subclass = refTypeSub
-# end class refTypeSub
-
-
-
-class refTextTypeSub(supermod.refTextType):
- def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.refTextType.__init__(self, mixedclass_, content_)
-
-supermod.refTextType.subclass = refTextTypeSub
-# end class refTextTypeSub
-
-class sectiondefTypeSub(supermod.sectiondefType):
-
-
- def __init__(self, kind=None, header='', description=None, memberdef=None):
- supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
-
- def find(self, details):
-
- for memberdef in self.memberdef:
- if memberdef.id == details.refid:
- return memberdef
-
- return None
-
-
-supermod.sectiondefType.subclass = sectiondefTypeSub
-# end class sectiondefTypeSub
-
-
-class memberdefTypeSub(supermod.memberdefType):
- def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
- supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
-supermod.memberdefType.subclass = memberdefTypeSub
-# end class memberdefTypeSub
-
-
-class descriptionTypeSub(supermod.descriptionType):
- def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
- supermod.descriptionType.__init__(self, mixedclass_, content_)
-supermod.descriptionType.subclass = descriptionTypeSub
-# end class descriptionTypeSub
-
-
-class enumvalueTypeSub(supermod.enumvalueType):
- def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
- supermod.enumvalueType.__init__(self, mixedclass_, content_)
-supermod.enumvalueType.subclass = enumvalueTypeSub
-# end class enumvalueTypeSub
-
-
-class templateparamlistTypeSub(supermod.templateparamlistType):
- def __init__(self, param=None):
- supermod.templateparamlistType.__init__(self, param)
-supermod.templateparamlistType.subclass = templateparamlistTypeSub
-# end class templateparamlistTypeSub
-
-
-class paramTypeSub(supermod.paramType):
- def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
- supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
-supermod.paramType.subclass = paramTypeSub
-# end class paramTypeSub
-
-
-class linkedTextTypeSub(supermod.linkedTextType):
- def __init__(self, ref=None, mixedclass_=None, content_=None):
- supermod.linkedTextType.__init__(self, mixedclass_, content_)
-supermod.linkedTextType.subclass = linkedTextTypeSub
-# end class linkedTextTypeSub
-
-
-class graphTypeSub(supermod.graphType):
- def __init__(self, node=None):
- supermod.graphType.__init__(self, node)
-supermod.graphType.subclass = graphTypeSub
-# end class graphTypeSub
-
-
-class nodeTypeSub(supermod.nodeType):
- def __init__(self, id=None, label='', link=None, childnode=None):
- supermod.nodeType.__init__(self, id, label, link, childnode)
-supermod.nodeType.subclass = nodeTypeSub
-# end class nodeTypeSub
-
-
-class childnodeTypeSub(supermod.childnodeType):
- def __init__(self, relation=None, refid=None, edgelabel=None):
- supermod.childnodeType.__init__(self, relation, refid, edgelabel)
-supermod.childnodeType.subclass = childnodeTypeSub
-# end class childnodeTypeSub
-
-
-class linkTypeSub(supermod.linkType):
- def __init__(self, refid=None, external=None, valueOf_=''):
- supermod.linkType.__init__(self, refid, external)
-supermod.linkType.subclass = linkTypeSub
-# end class linkTypeSub
-
-
-class listingTypeSub(supermod.listingType):
- def __init__(self, codeline=None):
- supermod.listingType.__init__(self, codeline)
-supermod.listingType.subclass = listingTypeSub
-# end class listingTypeSub
-
-
-class codelineTypeSub(supermod.codelineType):
- def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
- supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
-supermod.codelineType.subclass = codelineTypeSub
-# end class codelineTypeSub
-
-
-class highlightTypeSub(supermod.highlightType):
- def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
- supermod.highlightType.__init__(self, mixedclass_, content_)
-supermod.highlightType.subclass = highlightTypeSub
-# end class highlightTypeSub
-
-
-class referenceTypeSub(supermod.referenceType):
- def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.referenceType.__init__(self, mixedclass_, content_)
-supermod.referenceType.subclass = referenceTypeSub
-# end class referenceTypeSub
-
-
-class locationTypeSub(supermod.locationType):
- def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
- supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
-supermod.locationType.subclass = locationTypeSub
-# end class locationTypeSub
-
-
-class docSect1TypeSub(supermod.docSect1Type):
- def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
- supermod.docSect1Type.__init__(self, mixedclass_, content_)
-supermod.docSect1Type.subclass = docSect1TypeSub
-# end class docSect1TypeSub
-
-
-class docSect2TypeSub(supermod.docSect2Type):
- def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
- supermod.docSect2Type.__init__(self, mixedclass_, content_)
-supermod.docSect2Type.subclass = docSect2TypeSub
-# end class docSect2TypeSub
-
-
-class docSect3TypeSub(supermod.docSect3Type):
- def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
- supermod.docSect3Type.__init__(self, mixedclass_, content_)
-supermod.docSect3Type.subclass = docSect3TypeSub
-# end class docSect3TypeSub
-
-
-class docSect4TypeSub(supermod.docSect4Type):
- def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
- supermod.docSect4Type.__init__(self, mixedclass_, content_)
-supermod.docSect4Type.subclass = docSect4TypeSub
-# end class docSect4TypeSub
-
-
-class docInternalTypeSub(supermod.docInternalType):
- def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
- supermod.docInternalType.__init__(self, mixedclass_, content_)
-supermod.docInternalType.subclass = docInternalTypeSub
-# end class docInternalTypeSub
-
-
-class docInternalS1TypeSub(supermod.docInternalS1Type):
- def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
- supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
-supermod.docInternalS1Type.subclass = docInternalS1TypeSub
-# end class docInternalS1TypeSub
-
-
-class docInternalS2TypeSub(supermod.docInternalS2Type):
- def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
- supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
-supermod.docInternalS2Type.subclass = docInternalS2TypeSub
-# end class docInternalS2TypeSub
-
-
-class docInternalS3TypeSub(supermod.docInternalS3Type):
- def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
- supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
-supermod.docInternalS3Type.subclass = docInternalS3TypeSub
-# end class docInternalS3TypeSub
-
-
-class docInternalS4TypeSub(supermod.docInternalS4Type):
- def __init__(self, para=None, mixedclass_=None, content_=None):
- supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
-supermod.docInternalS4Type.subclass = docInternalS4TypeSub
-# end class docInternalS4TypeSub
-
-
-class docURLLinkSub(supermod.docURLLink):
- def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docURLLink.__init__(self, mixedclass_, content_)
-supermod.docURLLink.subclass = docURLLinkSub
-# end class docURLLinkSub
-
-
-class docAnchorTypeSub(supermod.docAnchorType):
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docAnchorType.__init__(self, mixedclass_, content_)
-supermod.docAnchorType.subclass = docAnchorTypeSub
-# end class docAnchorTypeSub
-
-
-class docFormulaTypeSub(supermod.docFormulaType):
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docFormulaType.__init__(self, mixedclass_, content_)
-supermod.docFormulaType.subclass = docFormulaTypeSub
-# end class docFormulaTypeSub
-
-
-class docIndexEntryTypeSub(supermod.docIndexEntryType):
- def __init__(self, primaryie='', secondaryie=''):
- supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
-supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
-# end class docIndexEntryTypeSub
-
-
-class docListTypeSub(supermod.docListType):
- def __init__(self, listitem=None):
- supermod.docListType.__init__(self, listitem)
-supermod.docListType.subclass = docListTypeSub
-# end class docListTypeSub
-
-
-class docListItemTypeSub(supermod.docListItemType):
- def __init__(self, para=None):
- supermod.docListItemType.__init__(self, para)
-supermod.docListItemType.subclass = docListItemTypeSub
-# end class docListItemTypeSub
-
-
-class docSimpleSectTypeSub(supermod.docSimpleSectType):
- def __init__(self, kind=None, title=None, para=None):
- supermod.docSimpleSectType.__init__(self, kind, title, para)
-supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
-# end class docSimpleSectTypeSub
-
-
-class docVarListEntryTypeSub(supermod.docVarListEntryType):
- def __init__(self, term=None):
- supermod.docVarListEntryType.__init__(self, term)
-supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
-# end class docVarListEntryTypeSub
-
-
-class docRefTextTypeSub(supermod.docRefTextType):
- def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docRefTextType.__init__(self, mixedclass_, content_)
-supermod.docRefTextType.subclass = docRefTextTypeSub
-# end class docRefTextTypeSub
-
-
-class docTableTypeSub(supermod.docTableType):
- def __init__(self, rows=None, cols=None, row=None, caption=None):
- supermod.docTableType.__init__(self, rows, cols, row, caption)
-supermod.docTableType.subclass = docTableTypeSub
-# end class docTableTypeSub
-
-
-class docRowTypeSub(supermod.docRowType):
- def __init__(self, entry=None):
- supermod.docRowType.__init__(self, entry)
-supermod.docRowType.subclass = docRowTypeSub
-# end class docRowTypeSub
-
-
-class docEntryTypeSub(supermod.docEntryType):
- def __init__(self, thead=None, para=None):
- supermod.docEntryType.__init__(self, thead, para)
-supermod.docEntryType.subclass = docEntryTypeSub
-# end class docEntryTypeSub
-
-
-class docHeadingTypeSub(supermod.docHeadingType):
- def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docHeadingType.__init__(self, mixedclass_, content_)
-supermod.docHeadingType.subclass = docHeadingTypeSub
-# end class docHeadingTypeSub
-
-
-class docImageTypeSub(supermod.docImageType):
- def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docImageType.__init__(self, mixedclass_, content_)
-supermod.docImageType.subclass = docImageTypeSub
-# end class docImageTypeSub
-
-
-class docDotFileTypeSub(supermod.docDotFileType):
- def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docDotFileType.__init__(self, mixedclass_, content_)
-supermod.docDotFileType.subclass = docDotFileTypeSub
-# end class docDotFileTypeSub
-
-
-class docTocItemTypeSub(supermod.docTocItemType):
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- supermod.docTocItemType.__init__(self, mixedclass_, content_)
-supermod.docTocItemType.subclass = docTocItemTypeSub
-# end class docTocItemTypeSub
-
-
-class docTocListTypeSub(supermod.docTocListType):
- def __init__(self, tocitem=None):
- supermod.docTocListType.__init__(self, tocitem)
-supermod.docTocListType.subclass = docTocListTypeSub
-# end class docTocListTypeSub
-
-
-class docLanguageTypeSub(supermod.docLanguageType):
- def __init__(self, langid=None, para=None):
- supermod.docLanguageType.__init__(self, langid, para)
-supermod.docLanguageType.subclass = docLanguageTypeSub
-# end class docLanguageTypeSub
-
-
-class docParamListTypeSub(supermod.docParamListType):
- def __init__(self, kind=None, parameteritem=None):
- supermod.docParamListType.__init__(self, kind, parameteritem)
-supermod.docParamListType.subclass = docParamListTypeSub
-# end class docParamListTypeSub
-
-
-class docParamListItemSub(supermod.docParamListItem):
- def __init__(self, parameternamelist=None, parameterdescription=None):
- supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
-supermod.docParamListItem.subclass = docParamListItemSub
-# end class docParamListItemSub
-
-
-class docParamNameListSub(supermod.docParamNameList):
- def __init__(self, parametername=None):
- supermod.docParamNameList.__init__(self, parametername)
-supermod.docParamNameList.subclass = docParamNameListSub
-# end class docParamNameListSub
-
-
-class docParamNameSub(supermod.docParamName):
- def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
- supermod.docParamName.__init__(self, mixedclass_, content_)
-supermod.docParamName.subclass = docParamNameSub
-# end class docParamNameSub
-
-
-class docXRefSectTypeSub(supermod.docXRefSectType):
- def __init__(self, id=None, xreftitle=None, xrefdescription=None):
- supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
-supermod.docXRefSectType.subclass = docXRefSectTypeSub
-# end class docXRefSectTypeSub
-
-
-class docCopyTypeSub(supermod.docCopyType):
- def __init__(self, link=None, para=None, sect1=None, internal=None):
- supermod.docCopyType.__init__(self, link, para, sect1, internal)
-supermod.docCopyType.subclass = docCopyTypeSub
-# end class docCopyTypeSub
-
-
-class docCharTypeSub(supermod.docCharType):
- def __init__(self, char=None, valueOf_=''):
- supermod.docCharType.__init__(self, char)
-supermod.docCharType.subclass = docCharTypeSub
-# end class docCharTypeSub
-
-class docParaTypeSub(supermod.docParaType):
- def __init__(self, char=None, valueOf_=''):
- supermod.docParaType.__init__(self, char)
-
- self.parameterlist = []
- self.simplesects = []
- self.content = []
-
- def buildChildren(self, child_, nodeName_):
- supermod.docParaType.buildChildren(self, child_, nodeName_)
-
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == "ref":
- obj_ = supermod.docRefTextType.factory()
- obj_.build(child_)
- self.content.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameterlist':
- obj_ = supermod.docParamListType.factory()
- obj_.build(child_)
- self.parameterlist.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'simplesect':
- obj_ = supermod.docSimpleSectType.factory()
- obj_.build(child_)
- self.simplesects.append(obj_)
-
-
-supermod.docParaType.subclass = docParaTypeSub
-# end class docParaTypeSub
-
-
-
-def parse(inFilename):
- doc = minidom.parse(inFilename)
- rootNode = doc.documentElement
- rootObj = supermod.DoxygenType.factory()
- rootObj.build(rootNode)
- return rootObj
-
-
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compoundsuper.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compoundsuper.py
deleted file mode 100644
index 6e984e13ec..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/compoundsuper.py
+++ /dev/null
@@ -1,8346 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Generated Thu Jun 11 18:44:25 2009 by generateDS.py.
-#
-
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import sys
-
-from xml.dom import minidom
-from xml.dom import Node
-
-import six
-
-
-#
-# User methods
-#
-# Calls to the methods in these classes are generated by generateDS.py.
-# You can replace these methods by re-implementing the following class
-# in a module named generatedssuper.py.
-
-try:
- from generatedssuper import GeneratedsSuper
-except ImportError as exp:
-
- class GeneratedsSuper(object):
- def format_string(self, input_data, input_name=''):
- return input_data
- def format_integer(self, input_data, input_name=''):
- return '%d' % input_data
- def format_float(self, input_data, input_name=''):
- return '%f' % input_data
- def format_double(self, input_data, input_name=''):
- return '%e' % input_data
- def format_boolean(self, input_data, input_name=''):
- return '%s' % input_data
-
-
-#
-# If you have installed IPython you can uncomment and use the following.
-# IPython is available from http://ipython.scipy.org/.
-#
-
-## from IPython.Shell import IPShellEmbed
-## args = ''
-## ipshell = IPShellEmbed(args,
-## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
-
-# Then use the following line where and when you want to drop into the
-# IPython shell:
-# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
-
-#
-# Globals
-#
-
-ExternalEncoding = 'ascii'
-
-#
-# Support/utility functions.
-#
-
-def showIndent(outfile, level):
- for idx in range(level):
- outfile.write(' ')
-
-def quote_xml(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- return s1
-
-def quote_attrib(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- if '"' in s1:
- if "'" in s1:
- s1 = '"%s"' % s1.replace('"', "&quot;")
- else:
- s1 = "'%s'" % s1
- else:
- s1 = '"%s"' % s1
- return s1
-
-def quote_python(inStr):
- s1 = inStr
- if s1.find("'") == -1:
- if s1.find('\n') == -1:
- return "'%s'" % s1
- else:
- return "'''%s'''" % s1
- else:
- if s1.find('"') != -1:
- s1 = s1.replace('"', '\\"')
- if s1.find('\n') == -1:
- return '"%s"' % s1
- else:
- return '"""%s"""' % s1
-
-
-class MixedContainer(object):
- # Constants for category:
- CategoryNone = 0
- CategoryText = 1
- CategorySimple = 2
- CategoryComplex = 3
- # Constants for content_type:
- TypeNone = 0
- TypeText = 1
- TypeString = 2
- TypeInteger = 3
- TypeFloat = 4
- TypeDecimal = 5
- TypeDouble = 6
- TypeBoolean = 7
- def __init__(self, category, content_type, name, value):
- self.category = category
- self.content_type = content_type
- self.name = name
- self.value = value
- def getCategory(self):
- return self.category
- def getContenttype(self, content_type):
- return self.content_type
- def getValue(self):
- return self.value
- def getName(self):
- return self.name
- def export(self, outfile, level, name, namespace):
- if self.category == MixedContainer.CategoryText:
- outfile.write(self.value)
- elif self.category == MixedContainer.CategorySimple:
- self.exportSimple(outfile, level, name)
- else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
- def exportSimple(self, outfile, level, name):
- if self.content_type == MixedContainer.TypeString:
- outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeInteger or \
- self.content_type == MixedContainer.TypeBoolean:
- outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeFloat or \
- self.content_type == MixedContainer.TypeDecimal:
- outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeDouble:
- outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
- def exportLiteral(self, outfile, level, name):
- if self.category == MixedContainer.CategoryText:
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
- elif self.category == MixedContainer.CategorySimple:
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
- else: # category == MixedContainer.CategoryComplex
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
- self.value.exportLiteral(outfile, level + 1)
- showIndent(outfile, level)
- outfile.write(')\n')
-
-
-class _MemberSpec(object):
- def __init__(self, name='', data_type='', container=0):
- self.name = name
- self.data_type = data_type
- self.container = container
- def set_name(self, name): self.name = name
- def get_name(self): return self.name
- def set_data_type(self, data_type): self.data_type = data_type
- def get_data_type(self): return self.data_type
- def set_container(self, container): self.container = container
- def get_container(self): return self.container
-
-
-#
-# Data representation classes.
-#
-
-class DoxygenType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, version=None, compounddef=None):
- self.version = version
- self.compounddef = compounddef
- def factory(*args_, **kwargs_):
- if DoxygenType.subclass:
- return DoxygenType.subclass(*args_, **kwargs_)
- else:
- return DoxygenType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_compounddef(self): return self.compounddef
- def set_compounddef(self, compounddef): self.compounddef = compounddef
- def get_version(self): return self.version
- def set_version(self, version): self.version = version
- def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
- outfile.write(' version=%s' % (quote_attrib(self.version), ))
- def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
- if self.compounddef:
- self.compounddef.export(outfile, level, namespace_, name_='compounddef')
- def hasContent_(self):
- if (
- self.compounddef is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DoxygenType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.version is not None:
- showIndent(outfile, level)
- outfile.write('version = "%s",\n' % (self.version,))
- def exportLiteralChildren(self, outfile, level, name_):
- if self.compounddef:
- showIndent(outfile, level)
- outfile.write('compounddef=model_.compounddefType(\n')
- self.compounddef.exportLiteral(outfile, level, name_='compounddef')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('version'):
- self.version = attrs.get('version').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compounddef':
- obj_ = compounddefType.factory()
- obj_.build(child_)
- self.set_compounddef(obj_)
-# end class DoxygenType
-
-
-class compounddefType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
- self.kind = kind
- self.prot = prot
- self.id = id
- self.compoundname = compoundname
- self.title = title
- if basecompoundref is None:
- self.basecompoundref = []
- else:
- self.basecompoundref = basecompoundref
- if derivedcompoundref is None:
- self.derivedcompoundref = []
- else:
- self.derivedcompoundref = derivedcompoundref
- if includes is None:
- self.includes = []
- else:
- self.includes = includes
- if includedby is None:
- self.includedby = []
- else:
- self.includedby = includedby
- self.incdepgraph = incdepgraph
- self.invincdepgraph = invincdepgraph
- if innerdir is None:
- self.innerdir = []
- else:
- self.innerdir = innerdir
- if innerfile is None:
- self.innerfile = []
- else:
- self.innerfile = innerfile
- if innerclass is None:
- self.innerclass = []
- else:
- self.innerclass = innerclass
- if innernamespace is None:
- self.innernamespace = []
- else:
- self.innernamespace = innernamespace
- if innerpage is None:
- self.innerpage = []
- else:
- self.innerpage = innerpage
- if innergroup is None:
- self.innergroup = []
- else:
- self.innergroup = innergroup
- self.templateparamlist = templateparamlist
- if sectiondef is None:
- self.sectiondef = []
- else:
- self.sectiondef = sectiondef
- self.briefdescription = briefdescription
- self.detaileddescription = detaileddescription
- self.inheritancegraph = inheritancegraph
- self.collaborationgraph = collaborationgraph
- self.programlisting = programlisting
- self.location = location
- self.listofallmembers = listofallmembers
- def factory(*args_, **kwargs_):
- if compounddefType.subclass:
- return compounddefType.subclass(*args_, **kwargs_)
- else:
- return compounddefType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_compoundname(self): return self.compoundname
- def set_compoundname(self, compoundname): self.compoundname = compoundname
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_basecompoundref(self): return self.basecompoundref
- def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref
- def add_basecompoundref(self, value): self.basecompoundref.append(value)
- def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value
- def get_derivedcompoundref(self): return self.derivedcompoundref
- def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref
- def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value)
- def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value
- def get_includes(self): return self.includes
- def set_includes(self, includes): self.includes = includes
- def add_includes(self, value): self.includes.append(value)
- def insert_includes(self, index, value): self.includes[index] = value
- def get_includedby(self): return self.includedby
- def set_includedby(self, includedby): self.includedby = includedby
- def add_includedby(self, value): self.includedby.append(value)
- def insert_includedby(self, index, value): self.includedby[index] = value
- def get_incdepgraph(self): return self.incdepgraph
- def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph
- def get_invincdepgraph(self): return self.invincdepgraph
- def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph
- def get_innerdir(self): return self.innerdir
- def set_innerdir(self, innerdir): self.innerdir = innerdir
- def add_innerdir(self, value): self.innerdir.append(value)
- def insert_innerdir(self, index, value): self.innerdir[index] = value
- def get_innerfile(self): return self.innerfile
- def set_innerfile(self, innerfile): self.innerfile = innerfile
- def add_innerfile(self, value): self.innerfile.append(value)
- def insert_innerfile(self, index, value): self.innerfile[index] = value
- def get_innerclass(self): return self.innerclass
- def set_innerclass(self, innerclass): self.innerclass = innerclass
- def add_innerclass(self, value): self.innerclass.append(value)
- def insert_innerclass(self, index, value): self.innerclass[index] = value
- def get_innernamespace(self): return self.innernamespace
- def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace
- def add_innernamespace(self, value): self.innernamespace.append(value)
- def insert_innernamespace(self, index, value): self.innernamespace[index] = value
- def get_innerpage(self): return self.innerpage
- def set_innerpage(self, innerpage): self.innerpage = innerpage
- def add_innerpage(self, value): self.innerpage.append(value)
- def insert_innerpage(self, index, value): self.innerpage[index] = value
- def get_innergroup(self): return self.innergroup
- def set_innergroup(self, innergroup): self.innergroup = innergroup
- def add_innergroup(self, value): self.innergroup.append(value)
- def insert_innergroup(self, index, value): self.innergroup[index] = value
- def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
- def get_sectiondef(self): return self.sectiondef
- def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef
- def add_sectiondef(self, value): self.sectiondef.append(value)
- def insert_sectiondef(self, index, value): self.sectiondef[index] = value
- def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
- def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
- def get_inheritancegraph(self): return self.inheritancegraph
- def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph
- def get_collaborationgraph(self): return self.collaborationgraph
- def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph
- def get_programlisting(self): return self.programlisting
- def set_programlisting(self, programlisting): self.programlisting = programlisting
- def get_location(self): return self.location
- def set_location(self, location): self.location = location
- def get_listofallmembers(self): return self.listofallmembers
- def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compounddefType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'):
- if self.kind is not None:
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'):
- if self.compoundname is not None:
- showIndent(outfile, level)
- outfile.write('<%scompoundname>%s</%scompoundname>\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_))
- if self.title is not None:
- showIndent(outfile, level)
- outfile.write('<%stitle>%s</%stitle>\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_))
- for basecompoundref_ in self.basecompoundref:
- basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref')
- for derivedcompoundref_ in self.derivedcompoundref:
- derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref')
- for includes_ in self.includes:
- includes_.export(outfile, level, namespace_, name_='includes')
- for includedby_ in self.includedby:
- includedby_.export(outfile, level, namespace_, name_='includedby')
- if self.incdepgraph:
- self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph')
- if self.invincdepgraph:
- self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph')
- for innerdir_ in self.innerdir:
- innerdir_.export(outfile, level, namespace_, name_='innerdir')
- for innerfile_ in self.innerfile:
- innerfile_.export(outfile, level, namespace_, name_='innerfile')
- for innerclass_ in self.innerclass:
- innerclass_.export(outfile, level, namespace_, name_='innerclass')
- for innernamespace_ in self.innernamespace:
- innernamespace_.export(outfile, level, namespace_, name_='innernamespace')
- for innerpage_ in self.innerpage:
- innerpage_.export(outfile, level, namespace_, name_='innerpage')
- for innergroup_ in self.innergroup:
- innergroup_.export(outfile, level, namespace_, name_='innergroup')
- if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
- for sectiondef_ in self.sectiondef:
- sectiondef_.export(outfile, level, namespace_, name_='sectiondef')
- if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
- if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
- if self.inheritancegraph:
- self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph')
- if self.collaborationgraph:
- self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph')
- if self.programlisting:
- self.programlisting.export(outfile, level, namespace_, name_='programlisting')
- if self.location:
- self.location.export(outfile, level, namespace_, name_='location')
- if self.listofallmembers:
- self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers')
- def hasContent_(self):
- if (
- self.compoundname is not None or
- self.title is not None or
- self.basecompoundref is not None or
- self.derivedcompoundref is not None or
- self.includes is not None or
- self.includedby is not None or
- self.incdepgraph is not None or
- self.invincdepgraph is not None or
- self.innerdir is not None or
- self.innerfile is not None or
- self.innerclass is not None or
- self.innernamespace is not None or
- self.innerpage is not None or
- self.innergroup is not None or
- self.templateparamlist is not None or
- self.sectiondef is not None or
- self.briefdescription is not None or
- self.detaileddescription is not None or
- self.inheritancegraph is not None or
- self.collaborationgraph is not None or
- self.programlisting is not None or
- self.location is not None or
- self.listofallmembers is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='compounddefType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding))
- if self.title:
- showIndent(outfile, level)
- outfile.write('title=model_.xsd_string(\n')
- self.title.exportLiteral(outfile, level, name_='title')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('basecompoundref=[\n')
- level += 1
- for basecompoundref in self.basecompoundref:
- showIndent(outfile, level)
- outfile.write('model_.basecompoundref(\n')
- basecompoundref.exportLiteral(outfile, level, name_='basecompoundref')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('derivedcompoundref=[\n')
- level += 1
- for derivedcompoundref in self.derivedcompoundref:
- showIndent(outfile, level)
- outfile.write('model_.derivedcompoundref(\n')
- derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('includes=[\n')
- level += 1
- for includes in self.includes:
- showIndent(outfile, level)
- outfile.write('model_.includes(\n')
- includes.exportLiteral(outfile, level, name_='includes')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('includedby=[\n')
- level += 1
- for includedby in self.includedby:
- showIndent(outfile, level)
- outfile.write('model_.includedby(\n')
- includedby.exportLiteral(outfile, level, name_='includedby')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.incdepgraph:
- showIndent(outfile, level)
- outfile.write('incdepgraph=model_.graphType(\n')
- self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.invincdepgraph:
- showIndent(outfile, level)
- outfile.write('invincdepgraph=model_.graphType(\n')
- self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('innerdir=[\n')
- level += 1
- for innerdir in self.innerdir:
- showIndent(outfile, level)
- outfile.write('model_.innerdir(\n')
- innerdir.exportLiteral(outfile, level, name_='innerdir')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('innerfile=[\n')
- level += 1
- for innerfile in self.innerfile:
- showIndent(outfile, level)
- outfile.write('model_.innerfile(\n')
- innerfile.exportLiteral(outfile, level, name_='innerfile')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('innerclass=[\n')
- level += 1
- for innerclass in self.innerclass:
- showIndent(outfile, level)
- outfile.write('model_.innerclass(\n')
- innerclass.exportLiteral(outfile, level, name_='innerclass')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('innernamespace=[\n')
- level += 1
- for innernamespace in self.innernamespace:
- showIndent(outfile, level)
- outfile.write('model_.innernamespace(\n')
- innernamespace.exportLiteral(outfile, level, name_='innernamespace')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('innerpage=[\n')
- level += 1
- for innerpage in self.innerpage:
- showIndent(outfile, level)
- outfile.write('model_.innerpage(\n')
- innerpage.exportLiteral(outfile, level, name_='innerpage')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('innergroup=[\n')
- level += 1
- for innergroup in self.innergroup:
- showIndent(outfile, level)
- outfile.write('model_.innergroup(\n')
- innergroup.exportLiteral(outfile, level, name_='innergroup')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.templateparamlist:
- showIndent(outfile, level)
- outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('sectiondef=[\n')
- level += 1
- for sectiondef in self.sectiondef:
- showIndent(outfile, level)
- outfile.write('model_.sectiondef(\n')
- sectiondef.exportLiteral(outfile, level, name_='sectiondef')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.briefdescription:
- showIndent(outfile, level)
- outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.detaileddescription:
- showIndent(outfile, level)
- outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.inheritancegraph:
- showIndent(outfile, level)
- outfile.write('inheritancegraph=model_.graphType(\n')
- self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.collaborationgraph:
- showIndent(outfile, level)
- outfile.write('collaborationgraph=model_.graphType(\n')
- self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.programlisting:
- showIndent(outfile, level)
- outfile.write('programlisting=model_.listingType(\n')
- self.programlisting.exportLiteral(outfile, level, name_='programlisting')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.location:
- showIndent(outfile, level)
- outfile.write('location=model_.locationType(\n')
- self.location.exportLiteral(outfile, level, name_='location')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.listofallmembers:
- showIndent(outfile, level)
- outfile.write('listofallmembers=model_.listofallmembersType(\n')
- self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compoundname':
- compoundname_ = ''
- for text__content_ in child_.childNodes:
- compoundname_ += text__content_.nodeValue
- self.compoundname = compoundname_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- obj_ = docTitleType.factory()
- obj_.build(child_)
- self.set_title(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'basecompoundref':
- obj_ = compoundRefType.factory()
- obj_.build(child_)
- self.basecompoundref.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'derivedcompoundref':
- obj_ = compoundRefType.factory()
- obj_.build(child_)
- self.derivedcompoundref.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includes':
- obj_ = incType.factory()
- obj_.build(child_)
- self.includes.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'includedby':
- obj_ = incType.factory()
- obj_.build(child_)
- self.includedby.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'incdepgraph':
- obj_ = graphType.factory()
- obj_.build(child_)
- self.set_incdepgraph(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'invincdepgraph':
- obj_ = graphType.factory()
- obj_.build(child_)
- self.set_invincdepgraph(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerdir':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innerdir.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerfile':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innerfile.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerclass':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innerclass.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innernamespace':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innernamespace.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innerpage':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innerpage.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'innergroup':
- obj_ = refType.factory()
- obj_.build(child_)
- self.innergroup.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
- obj_ = templateparamlistType.factory()
- obj_.build(child_)
- self.set_templateparamlist(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sectiondef':
- obj_ = sectiondefType.factory()
- obj_.build(child_)
- self.sectiondef.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_briefdescription(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_detaileddescription(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inheritancegraph':
- obj_ = graphType.factory()
- obj_.build(child_)
- self.set_inheritancegraph(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'collaborationgraph':
- obj_ = graphType.factory()
- obj_.build(child_)
- self.set_collaborationgraph(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'programlisting':
- obj_ = listingType.factory()
- obj_.build(child_)
- self.set_programlisting(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
- obj_ = locationType.factory()
- obj_.build(child_)
- self.set_location(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listofallmembers':
- obj_ = listofallmembersType.factory()
- obj_.build(child_)
- self.set_listofallmembers(obj_)
-# end class compounddefType
-
-
-class listofallmembersType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, member=None):
- if member is None:
- self.member = []
- else:
- self.member = member
- def factory(*args_, **kwargs_):
- if listofallmembersType.subclass:
- return listofallmembersType.subclass(*args_, **kwargs_)
- else:
- return listofallmembersType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_member(self): return self.member
- def set_member(self, member): self.member = member
- def add_member(self, value): self.member.append(value)
- def insert_member(self, index, value): self.member[index] = value
- def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'):
- for member_ in self.member:
- member_.export(outfile, level, namespace_, name_='member')
- def hasContent_(self):
- if (
- self.member is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='listofallmembersType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('member=[\n')
- level += 1
- for member in self.member:
- showIndent(outfile, level)
- outfile.write('model_.member(\n')
- member.exportLiteral(outfile, level, name_='member')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
- obj_ = memberRefType.factory()
- obj_.build(child_)
- self.member.append(obj_)
-# end class listofallmembersType
-
-
-class memberRefType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None):
- self.virt = virt
- self.prot = prot
- self.refid = refid
- self.ambiguityscope = ambiguityscope
- self.scope = scope
- self.name = name
- def factory(*args_, **kwargs_):
- if memberRefType.subclass:
- return memberRefType.subclass(*args_, **kwargs_)
- else:
- return memberRefType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_scope(self): return self.scope
- def set_scope(self, scope): self.scope = scope
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_virt(self): return self.virt
- def set_virt(self, virt): self.virt = virt
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def get_ambiguityscope(self): return self.ambiguityscope
- def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope
- def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberRefType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'):
- if self.virt is not None:
- outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- if self.ambiguityscope is not None:
- outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'):
- if self.scope is not None:
- showIndent(outfile, level)
- outfile.write('<%sscope>%s</%sscope>\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_))
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
- def hasContent_(self):
- if (
- self.scope is not None or
- self.name is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='memberRefType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.virt is not None:
- showIndent(outfile, level)
- outfile.write('virt = "%s",\n' % (self.virt,))
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- if self.ambiguityscope is not None:
- showIndent(outfile, level)
- outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('virt'):
- self.virt = attrs.get('virt').value
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- if attrs.get('ambiguityscope'):
- self.ambiguityscope = attrs.get('ambiguityscope').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'scope':
- scope_ = ''
- for text__content_ in child_.childNodes:
- scope_ += text__content_.nodeValue
- self.scope = scope_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
- name_ = ''
- for text__content_ in child_.childNodes:
- name_ += text__content_.nodeValue
- self.name = name_
-# end class memberRefType
-
-
-class scope(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if scope.subclass:
- return scope.subclass(*args_, **kwargs_)
- else:
- return scope(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='scope')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='scope'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='scope'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='scope'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class scope
-
-
-class name(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if name.subclass:
- return name.subclass(*args_, **kwargs_)
- else:
- return name(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='name')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='name'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='name'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='name'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class name
-
-
-class compoundRefType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- self.virt = virt
- self.prot = prot
- self.refid = refid
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if compoundRefType.subclass:
- return compoundRefType.subclass(*args_, **kwargs_)
- else:
- return compoundRefType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_virt(self): return self.virt
- def set_virt(self, virt): self.virt = virt
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='compoundRefType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'):
- if self.virt is not None:
- outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='compoundRefType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.virt is not None:
- showIndent(outfile, level)
- outfile.write('virt = "%s",\n' % (self.virt,))
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('virt'):
- self.virt = attrs.get('virt').value
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class compoundRefType
-
-
-class reimplementType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
- self.refid = refid
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if reimplementType.subclass:
- return reimplementType.subclass(*args_, **kwargs_)
- else:
- return reimplementType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='reimplementType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'):
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='reimplementType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class reimplementType
-
-
-class incType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- self.local = local
- self.refid = refid
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if incType.subclass:
- return incType.subclass(*args_, **kwargs_)
- else:
- return incType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_local(self): return self.local
- def set_local(self, local): self.local = local
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='incType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='incType'):
- if self.local is not None:
- outfile.write(' local=%s' % (quote_attrib(self.local), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='incType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='incType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.local is not None:
- showIndent(outfile, level)
- outfile.write('local = "%s",\n' % (self.local,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('local'):
- self.local = attrs.get('local').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class incType
-
-
-class refType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
- self.prot = prot
- self.refid = refid
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if refType.subclass:
- return refType.subclass(*args_, **kwargs_)
- else:
- return refType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='refType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='refType'):
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='refType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='refType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class refType
-
-
-class refTextType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
- self.refid = refid
- self.kindref = kindref
- self.external = external
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if refTextType.subclass:
- return refTextType.subclass(*args_, **kwargs_)
- else:
- return refTextType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def get_kindref(self): return self.kindref
- def set_kindref(self, kindref): self.kindref = kindref
- def get_external(self): return self.external
- def set_external(self, external): self.external = external
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='refTextType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'):
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- if self.kindref is not None:
- outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
- if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='refTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='refTextType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- if self.kindref is not None:
- showIndent(outfile, level)
- outfile.write('kindref = "%s",\n' % (self.kindref,))
- if self.external is not None:
- showIndent(outfile, level)
- outfile.write('external = %s,\n' % (self.external,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- if attrs.get('kindref'):
- self.kindref = attrs.get('kindref').value
- if attrs.get('external'):
- self.external = attrs.get('external').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class refTextType
-
-
-class sectiondefType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, header=None, description=None, memberdef=None):
- self.kind = kind
- self.header = header
- self.description = description
- if memberdef is None:
- self.memberdef = []
- else:
- self.memberdef = memberdef
- def factory(*args_, **kwargs_):
- if sectiondefType.subclass:
- return sectiondefType.subclass(*args_, **kwargs_)
- else:
- return sectiondefType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_header(self): return self.header
- def set_header(self, header): self.header = header
- def get_description(self): return self.description
- def set_description(self, description): self.description = description
- def get_memberdef(self): return self.memberdef
- def set_memberdef(self, memberdef): self.memberdef = memberdef
- def add_memberdef(self, value): self.memberdef.append(value)
- def insert_memberdef(self, index, value): self.memberdef[index] = value
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='sectiondefType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'):
- if self.kind is not None:
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'):
- if self.header is not None:
- showIndent(outfile, level)
- outfile.write('<%sheader>%s</%sheader>\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_))
- if self.description:
- self.description.export(outfile, level, namespace_, name_='description')
- for memberdef_ in self.memberdef:
- memberdef_.export(outfile, level, namespace_, name_='memberdef')
- def hasContent_(self):
- if (
- self.header is not None or
- self.description is not None or
- self.memberdef is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='sectiondefType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding))
- if self.description:
- showIndent(outfile, level)
- outfile.write('description=model_.descriptionType(\n')
- self.description.exportLiteral(outfile, level, name_='description')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('memberdef=[\n')
- level += 1
- for memberdef in self.memberdef:
- showIndent(outfile, level)
- outfile.write('model_.memberdef(\n')
- memberdef.exportLiteral(outfile, level, name_='memberdef')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'header':
- header_ = ''
- for text__content_ in child_.childNodes:
- header_ += text__content_.nodeValue
- self.header = header_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'description':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_description(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'memberdef':
- obj_ = memberdefType.factory()
- obj_.build(child_)
- self.memberdef.append(obj_)
-# end class sectiondefType
-
-
-class memberdefType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
- self.initonly = initonly
- self.kind = kind
- self.volatile = volatile
- self.const = const
- self.raisexx = raisexx
- self.virt = virt
- self.readable = readable
- self.prot = prot
- self.explicit = explicit
- self.new = new
- self.final = final
- self.writable = writable
- self.add = add
- self.static = static
- self.remove = remove
- self.sealed = sealed
- self.mutable = mutable
- self.gettable = gettable
- self.inline = inline
- self.settable = settable
- self.id = id
- self.templateparamlist = templateparamlist
- self.type_ = type_
- self.definition = definition
- self.argsstring = argsstring
- self.name = name
- self.read = read
- self.write = write
- self.bitfield = bitfield
- if reimplements is None:
- self.reimplements = []
- else:
- self.reimplements = reimplements
- if reimplementedby is None:
- self.reimplementedby = []
- else:
- self.reimplementedby = reimplementedby
- if param is None:
- self.param = []
- else:
- self.param = param
- if enumvalue is None:
- self.enumvalue = []
- else:
- self.enumvalue = enumvalue
- self.initializer = initializer
- self.exceptions = exceptions
- self.briefdescription = briefdescription
- self.detaileddescription = detaileddescription
- self.inbodydescription = inbodydescription
- self.location = location
- if references is None:
- self.references = []
- else:
- self.references = references
- if referencedby is None:
- self.referencedby = []
- else:
- self.referencedby = referencedby
- def factory(*args_, **kwargs_):
- if memberdefType.subclass:
- return memberdefType.subclass(*args_, **kwargs_)
- else:
- return memberdefType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_templateparamlist(self): return self.templateparamlist
- def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist
- def get_type(self): return self.type_
- def set_type(self, type_): self.type_ = type_
- def get_definition(self): return self.definition
- def set_definition(self, definition): self.definition = definition
- def get_argsstring(self): return self.argsstring
- def set_argsstring(self, argsstring): self.argsstring = argsstring
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_read(self): return self.read
- def set_read(self, read): self.read = read
- def get_write(self): return self.write
- def set_write(self, write): self.write = write
- def get_bitfield(self): return self.bitfield
- def set_bitfield(self, bitfield): self.bitfield = bitfield
- def get_reimplements(self): return self.reimplements
- def set_reimplements(self, reimplements): self.reimplements = reimplements
- def add_reimplements(self, value): self.reimplements.append(value)
- def insert_reimplements(self, index, value): self.reimplements[index] = value
- def get_reimplementedby(self): return self.reimplementedby
- def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby
- def add_reimplementedby(self, value): self.reimplementedby.append(value)
- def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value
- def get_param(self): return self.param
- def set_param(self, param): self.param = param
- def add_param(self, value): self.param.append(value)
- def insert_param(self, index, value): self.param[index] = value
- def get_enumvalue(self): return self.enumvalue
- def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue
- def add_enumvalue(self, value): self.enumvalue.append(value)
- def insert_enumvalue(self, index, value): self.enumvalue[index] = value
- def get_initializer(self): return self.initializer
- def set_initializer(self, initializer): self.initializer = initializer
- def get_exceptions(self): return self.exceptions
- def set_exceptions(self, exceptions): self.exceptions = exceptions
- def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
- def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
- def get_inbodydescription(self): return self.inbodydescription
- def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription
- def get_location(self): return self.location
- def set_location(self, location): self.location = location
- def get_references(self): return self.references
- def set_references(self, references): self.references = references
- def add_references(self, value): self.references.append(value)
- def insert_references(self, index, value): self.references[index] = value
- def get_referencedby(self): return self.referencedby
- def set_referencedby(self, referencedby): self.referencedby = referencedby
- def add_referencedby(self, value): self.referencedby.append(value)
- def insert_referencedby(self, index, value): self.referencedby[index] = value
- def get_initonly(self): return self.initonly
- def set_initonly(self, initonly): self.initonly = initonly
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def get_volatile(self): return self.volatile
- def set_volatile(self, volatile): self.volatile = volatile
- def get_const(self): return self.const
- def set_const(self, const): self.const = const
- def get_raise(self): return self.raisexx
- def set_raise(self, raisexx): self.raisexx = raisexx
- def get_virt(self): return self.virt
- def set_virt(self, virt): self.virt = virt
- def get_readable(self): return self.readable
- def set_readable(self, readable): self.readable = readable
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_explicit(self): return self.explicit
- def set_explicit(self, explicit): self.explicit = explicit
- def get_new(self): return self.new
- def set_new(self, new): self.new = new
- def get_final(self): return self.final
- def set_final(self, final): self.final = final
- def get_writable(self): return self.writable
- def set_writable(self, writable): self.writable = writable
- def get_add(self): return self.add
- def set_add(self, add): self.add = add
- def get_static(self): return self.static
- def set_static(self, static): self.static = static
- def get_remove(self): return self.remove
- def set_remove(self, remove): self.remove = remove
- def get_sealed(self): return self.sealed
- def set_sealed(self, sealed): self.sealed = sealed
- def get_mutable(self): return self.mutable
- def set_mutable(self, mutable): self.mutable = mutable
- def get_gettable(self): return self.gettable
- def set_gettable(self, gettable): self.gettable = gettable
- def get_inline(self): return self.inline
- def set_inline(self, inline): self.inline = inline
- def get_settable(self): return self.settable
- def set_settable(self, settable): self.settable = settable
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='memberdefType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'):
- if self.initonly is not None:
- outfile.write(' initonly=%s' % (quote_attrib(self.initonly), ))
- if self.kind is not None:
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- if self.volatile is not None:
- outfile.write(' volatile=%s' % (quote_attrib(self.volatile), ))
- if self.const is not None:
- outfile.write(' const=%s' % (quote_attrib(self.const), ))
- if self.raisexx is not None:
- outfile.write(' raise=%s' % (quote_attrib(self.raisexx), ))
- if self.virt is not None:
- outfile.write(' virt=%s' % (quote_attrib(self.virt), ))
- if self.readable is not None:
- outfile.write(' readable=%s' % (quote_attrib(self.readable), ))
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.explicit is not None:
- outfile.write(' explicit=%s' % (quote_attrib(self.explicit), ))
- if self.new is not None:
- outfile.write(' new=%s' % (quote_attrib(self.new), ))
- if self.final is not None:
- outfile.write(' final=%s' % (quote_attrib(self.final), ))
- if self.writable is not None:
- outfile.write(' writable=%s' % (quote_attrib(self.writable), ))
- if self.add is not None:
- outfile.write(' add=%s' % (quote_attrib(self.add), ))
- if self.static is not None:
- outfile.write(' static=%s' % (quote_attrib(self.static), ))
- if self.remove is not None:
- outfile.write(' remove=%s' % (quote_attrib(self.remove), ))
- if self.sealed is not None:
- outfile.write(' sealed=%s' % (quote_attrib(self.sealed), ))
- if self.mutable is not None:
- outfile.write(' mutable=%s' % (quote_attrib(self.mutable), ))
- if self.gettable is not None:
- outfile.write(' gettable=%s' % (quote_attrib(self.gettable), ))
- if self.inline is not None:
- outfile.write(' inline=%s' % (quote_attrib(self.inline), ))
- if self.settable is not None:
- outfile.write(' settable=%s' % (quote_attrib(self.settable), ))
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'):
- if self.templateparamlist:
- self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist')
- if self.type_:
- self.type_.export(outfile, level, namespace_, name_='type')
- if self.definition is not None:
- showIndent(outfile, level)
- outfile.write('<%sdefinition>%s</%sdefinition>\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_))
- if self.argsstring is not None:
- showIndent(outfile, level)
- outfile.write('<%sargsstring>%s</%sargsstring>\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_))
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
- if self.read is not None:
- showIndent(outfile, level)
- outfile.write('<%sread>%s</%sread>\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_))
- if self.write is not None:
- showIndent(outfile, level)
- outfile.write('<%swrite>%s</%swrite>\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_))
- if self.bitfield is not None:
- showIndent(outfile, level)
- outfile.write('<%sbitfield>%s</%sbitfield>\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_))
- for reimplements_ in self.reimplements:
- reimplements_.export(outfile, level, namespace_, name_='reimplements')
- for reimplementedby_ in self.reimplementedby:
- reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby')
- for param_ in self.param:
- param_.export(outfile, level, namespace_, name_='param')
- for enumvalue_ in self.enumvalue:
- enumvalue_.export(outfile, level, namespace_, name_='enumvalue')
- if self.initializer:
- self.initializer.export(outfile, level, namespace_, name_='initializer')
- if self.exceptions:
- self.exceptions.export(outfile, level, namespace_, name_='exceptions')
- if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
- if self.detaileddescription:
- self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription')
- if self.inbodydescription:
- self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription')
- if self.location:
- self.location.export(outfile, level, namespace_, name_='location', )
- for references_ in self.references:
- references_.export(outfile, level, namespace_, name_='references')
- for referencedby_ in self.referencedby:
- referencedby_.export(outfile, level, namespace_, name_='referencedby')
- def hasContent_(self):
- if (
- self.templateparamlist is not None or
- self.type_ is not None or
- self.definition is not None or
- self.argsstring is not None or
- self.name is not None or
- self.read is not None or
- self.write is not None or
- self.bitfield is not None or
- self.reimplements is not None or
- self.reimplementedby is not None or
- self.param is not None or
- self.enumvalue is not None or
- self.initializer is not None or
- self.exceptions is not None or
- self.briefdescription is not None or
- self.detaileddescription is not None or
- self.inbodydescription is not None or
- self.location is not None or
- self.references is not None or
- self.referencedby is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='memberdefType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.initonly is not None:
- showIndent(outfile, level)
- outfile.write('initonly = "%s",\n' % (self.initonly,))
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- if self.volatile is not None:
- showIndent(outfile, level)
- outfile.write('volatile = "%s",\n' % (self.volatile,))
- if self.const is not None:
- showIndent(outfile, level)
- outfile.write('const = "%s",\n' % (self.const,))
- if self.raisexx is not None:
- showIndent(outfile, level)
- outfile.write('raisexx = "%s",\n' % (self.raisexx,))
- if self.virt is not None:
- showIndent(outfile, level)
- outfile.write('virt = "%s",\n' % (self.virt,))
- if self.readable is not None:
- showIndent(outfile, level)
- outfile.write('readable = "%s",\n' % (self.readable,))
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.explicit is not None:
- showIndent(outfile, level)
- outfile.write('explicit = "%s",\n' % (self.explicit,))
- if self.new is not None:
- showIndent(outfile, level)
- outfile.write('new = "%s",\n' % (self.new,))
- if self.final is not None:
- showIndent(outfile, level)
- outfile.write('final = "%s",\n' % (self.final,))
- if self.writable is not None:
- showIndent(outfile, level)
- outfile.write('writable = "%s",\n' % (self.writable,))
- if self.add is not None:
- showIndent(outfile, level)
- outfile.write('add = "%s",\n' % (self.add,))
- if self.static is not None:
- showIndent(outfile, level)
- outfile.write('static = "%s",\n' % (self.static,))
- if self.remove is not None:
- showIndent(outfile, level)
- outfile.write('remove = "%s",\n' % (self.remove,))
- if self.sealed is not None:
- showIndent(outfile, level)
- outfile.write('sealed = "%s",\n' % (self.sealed,))
- if self.mutable is not None:
- showIndent(outfile, level)
- outfile.write('mutable = "%s",\n' % (self.mutable,))
- if self.gettable is not None:
- showIndent(outfile, level)
- outfile.write('gettable = "%s",\n' % (self.gettable,))
- if self.inline is not None:
- showIndent(outfile, level)
- outfile.write('inline = "%s",\n' % (self.inline,))
- if self.settable is not None:
- showIndent(outfile, level)
- outfile.write('settable = "%s",\n' % (self.settable,))
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- if self.templateparamlist:
- showIndent(outfile, level)
- outfile.write('templateparamlist=model_.templateparamlistType(\n')
- self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.type_:
- showIndent(outfile, level)
- outfile.write('type_=model_.linkedTextType(\n')
- self.type_.exportLiteral(outfile, level, name_='type')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('reimplements=[\n')
- level += 1
- for reimplements in self.reimplements:
- showIndent(outfile, level)
- outfile.write('model_.reimplements(\n')
- reimplements.exportLiteral(outfile, level, name_='reimplements')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('reimplementedby=[\n')
- level += 1
- for reimplementedby in self.reimplementedby:
- showIndent(outfile, level)
- outfile.write('model_.reimplementedby(\n')
- reimplementedby.exportLiteral(outfile, level, name_='reimplementedby')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('param=[\n')
- level += 1
- for param in self.param:
- showIndent(outfile, level)
- outfile.write('model_.param(\n')
- param.exportLiteral(outfile, level, name_='param')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('enumvalue=[\n')
- level += 1
- for enumvalue in self.enumvalue:
- showIndent(outfile, level)
- outfile.write('model_.enumvalue(\n')
- enumvalue.exportLiteral(outfile, level, name_='enumvalue')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.initializer:
- showIndent(outfile, level)
- outfile.write('initializer=model_.linkedTextType(\n')
- self.initializer.exportLiteral(outfile, level, name_='initializer')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.exceptions:
- showIndent(outfile, level)
- outfile.write('exceptions=model_.linkedTextType(\n')
- self.exceptions.exportLiteral(outfile, level, name_='exceptions')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.briefdescription:
- showIndent(outfile, level)
- outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.detaileddescription:
- showIndent(outfile, level)
- outfile.write('detaileddescription=model_.descriptionType(\n')
- self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.inbodydescription:
- showIndent(outfile, level)
- outfile.write('inbodydescription=model_.descriptionType(\n')
- self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.location:
- showIndent(outfile, level)
- outfile.write('location=model_.locationType(\n')
- self.location.exportLiteral(outfile, level, name_='location')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('references=[\n')
- level += 1
- for references in self.references:
- showIndent(outfile, level)
- outfile.write('model_.references(\n')
- references.exportLiteral(outfile, level, name_='references')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('referencedby=[\n')
- level += 1
- for referencedby in self.referencedby:
- showIndent(outfile, level)
- outfile.write('model_.referencedby(\n')
- referencedby.exportLiteral(outfile, level, name_='referencedby')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('initonly'):
- self.initonly = attrs.get('initonly').value
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- if attrs.get('volatile'):
- self.volatile = attrs.get('volatile').value
- if attrs.get('const'):
- self.const = attrs.get('const').value
- if attrs.get('raise'):
- self.raisexx = attrs.get('raise').value
- if attrs.get('virt'):
- self.virt = attrs.get('virt').value
- if attrs.get('readable'):
- self.readable = attrs.get('readable').value
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('explicit'):
- self.explicit = attrs.get('explicit').value
- if attrs.get('new'):
- self.new = attrs.get('new').value
- if attrs.get('final'):
- self.final = attrs.get('final').value
- if attrs.get('writable'):
- self.writable = attrs.get('writable').value
- if attrs.get('add'):
- self.add = attrs.get('add').value
- if attrs.get('static'):
- self.static = attrs.get('static').value
- if attrs.get('remove'):
- self.remove = attrs.get('remove').value
- if attrs.get('sealed'):
- self.sealed = attrs.get('sealed').value
- if attrs.get('mutable'):
- self.mutable = attrs.get('mutable').value
- if attrs.get('gettable'):
- self.gettable = attrs.get('gettable').value
- if attrs.get('inline'):
- self.inline = attrs.get('inline').value
- if attrs.get('settable'):
- self.settable = attrs.get('settable').value
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'templateparamlist':
- obj_ = templateparamlistType.factory()
- obj_.build(child_)
- self.set_templateparamlist(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
- obj_ = linkedTextType.factory()
- obj_.build(child_)
- self.set_type(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'definition':
- definition_ = ''
- for text__content_ in child_.childNodes:
- definition_ += text__content_.nodeValue
- self.definition = definition_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'argsstring':
- argsstring_ = ''
- for text__content_ in child_.childNodes:
- argsstring_ += text__content_.nodeValue
- self.argsstring = argsstring_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
- name_ = ''
- for text__content_ in child_.childNodes:
- name_ += text__content_.nodeValue
- self.name = name_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'read':
- read_ = ''
- for text__content_ in child_.childNodes:
- read_ += text__content_.nodeValue
- self.read = read_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'write':
- write_ = ''
- for text__content_ in child_.childNodes:
- write_ += text__content_.nodeValue
- self.write = write_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'bitfield':
- bitfield_ = ''
- for text__content_ in child_.childNodes:
- bitfield_ += text__content_.nodeValue
- self.bitfield = bitfield_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplements':
- obj_ = reimplementType.factory()
- obj_.build(child_)
- self.reimplements.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'reimplementedby':
- obj_ = reimplementType.factory()
- obj_.build(child_)
- self.reimplementedby.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
- obj_ = paramType.factory()
- obj_.build(child_)
- self.param.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'enumvalue':
- obj_ = enumvalueType.factory()
- obj_.build(child_)
- self.enumvalue.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
- obj_ = linkedTextType.factory()
- obj_.build(child_)
- self.set_initializer(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'exceptions':
- obj_ = linkedTextType.factory()
- obj_.build(child_)
- self.set_exceptions(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_briefdescription(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_detaileddescription(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'inbodydescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_inbodydescription(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'location':
- obj_ = locationType.factory()
- obj_.build(child_)
- self.set_location(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'references':
- obj_ = referenceType.factory()
- obj_.build(child_)
- self.references.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'referencedby':
- obj_ = referenceType.factory()
- obj_.build(child_)
- self.referencedby.append(obj_)
-# end class memberdefType
-
-
-class definition(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if definition.subclass:
- return definition.subclass(*args_, **kwargs_)
- else:
- return definition(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='definition')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='definition'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='definition'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='definition'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class definition
-
-
-class argsstring(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if argsstring.subclass:
- return argsstring.subclass(*args_, **kwargs_)
- else:
- return argsstring(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='argsstring')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='argsstring'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='argsstring'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class argsstring
-
-
-class read(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if read.subclass:
- return read.subclass(*args_, **kwargs_)
- else:
- return read(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='read')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='read'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='read'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='read'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class read
-
-
-class write(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if write.subclass:
- return write.subclass(*args_, **kwargs_)
- else:
- return write(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='write')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='write'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='write'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='write'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class write
-
-
-class bitfield(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if bitfield.subclass:
- return bitfield.subclass(*args_, **kwargs_)
- else:
- return bitfield(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='bitfield')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='bitfield'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='bitfield'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class bitfield
-
-
-class descriptionType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if descriptionType.subclass:
- return descriptionType.subclass(*args_, **kwargs_)
- else:
- return descriptionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect1(self): return self.sect1
- def set_sect1(self, sect1): self.sect1 = sect1
- def add_sect1(self, value): self.sect1.append(value)
- def insert_sect1(self, index, value): self.sect1[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='descriptionType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None or
- self.sect1 is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='descriptionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- childobj_ = docTitleType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
- childobj_ = docSect1Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- childobj_ = docInternalType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class descriptionType
-
-
-class enumvalueType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
- self.prot = prot
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if enumvalueType.subclass:
- return enumvalueType.subclass(*args_, **kwargs_)
- else:
- return enumvalueType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_initializer(self): return self.initializer
- def set_initializer(self, initializer): self.initializer = initializer
- def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
- def get_detaileddescription(self): return self.detaileddescription
- def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription
- def get_prot(self): return self.prot
- def set_prot(self, prot): self.prot = prot
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='enumvalueType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'):
- if self.prot is not None:
- outfile.write(' prot=%s' % (quote_attrib(self.prot), ))
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.name is not None or
- self.initializer is not None or
- self.briefdescription is not None or
- self.detaileddescription is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='enumvalueType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.prot is not None:
- showIndent(outfile, level)
- outfile.write('prot = "%s",\n' % (self.prot,))
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('prot'):
- self.prot = attrs.get('prot').value
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
- value_ = []
- for text_ in child_.childNodes:
- value_.append(text_.nodeValue)
- valuestr_ = ''.join(value_)
- obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'name', valuestr_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'initializer':
- childobj_ = linkedTextType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'initializer', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
- childobj_ = descriptionType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'briefdescription', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'detaileddescription':
- childobj_ = descriptionType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'detaileddescription', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class enumvalueType
-
-
-class templateparamlistType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, param=None):
- if param is None:
- self.param = []
- else:
- self.param = param
- def factory(*args_, **kwargs_):
- if templateparamlistType.subclass:
- return templateparamlistType.subclass(*args_, **kwargs_)
- else:
- return templateparamlistType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_param(self): return self.param
- def set_param(self, param): self.param = param
- def add_param(self, value): self.param.append(value)
- def insert_param(self, index, value): self.param[index] = value
- def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'):
- for param_ in self.param:
- param_.export(outfile, level, namespace_, name_='param')
- def hasContent_(self):
- if (
- self.param is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='templateparamlistType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('param=[\n')
- level += 1
- for param in self.param:
- showIndent(outfile, level)
- outfile.write('model_.param(\n')
- param.exportLiteral(outfile, level, name_='param')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'param':
- obj_ = paramType.factory()
- obj_.build(child_)
- self.param.append(obj_)
-# end class templateparamlistType
-
-
-class paramType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None):
- self.type_ = type_
- self.declname = declname
- self.defname = defname
- self.array = array
- self.defval = defval
- self.briefdescription = briefdescription
- def factory(*args_, **kwargs_):
- if paramType.subclass:
- return paramType.subclass(*args_, **kwargs_)
- else:
- return paramType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_type(self): return self.type_
- def set_type(self, type_): self.type_ = type_
- def get_declname(self): return self.declname
- def set_declname(self, declname): self.declname = declname
- def get_defname(self): return self.defname
- def set_defname(self, defname): self.defname = defname
- def get_array(self): return self.array
- def set_array(self, array): self.array = array
- def get_defval(self): return self.defval
- def set_defval(self, defval): self.defval = defval
- def get_briefdescription(self): return self.briefdescription
- def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription
- def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='paramType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='paramType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='paramType'):
- if self.type_:
- self.type_.export(outfile, level, namespace_, name_='type')
- if self.declname is not None:
- showIndent(outfile, level)
- outfile.write('<%sdeclname>%s</%sdeclname>\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_))
- if self.defname is not None:
- showIndent(outfile, level)
- outfile.write('<%sdefname>%s</%sdefname>\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_))
- if self.array is not None:
- showIndent(outfile, level)
- outfile.write('<%sarray>%s</%sarray>\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_))
- if self.defval:
- self.defval.export(outfile, level, namespace_, name_='defval')
- if self.briefdescription:
- self.briefdescription.export(outfile, level, namespace_, name_='briefdescription')
- def hasContent_(self):
- if (
- self.type_ is not None or
- self.declname is not None or
- self.defname is not None or
- self.array is not None or
- self.defval is not None or
- self.briefdescription is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='paramType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.type_:
- showIndent(outfile, level)
- outfile.write('type_=model_.linkedTextType(\n')
- self.type_.exportLiteral(outfile, level, name_='type')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding))
- if self.defval:
- showIndent(outfile, level)
- outfile.write('defval=model_.linkedTextType(\n')
- self.defval.exportLiteral(outfile, level, name_='defval')
- showIndent(outfile, level)
- outfile.write('),\n')
- if self.briefdescription:
- showIndent(outfile, level)
- outfile.write('briefdescription=model_.descriptionType(\n')
- self.briefdescription.exportLiteral(outfile, level, name_='briefdescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'type':
- obj_ = linkedTextType.factory()
- obj_.build(child_)
- self.set_type(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'declname':
- declname_ = ''
- for text__content_ in child_.childNodes:
- declname_ += text__content_.nodeValue
- self.declname = declname_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defname':
- defname_ = ''
- for text__content_ in child_.childNodes:
- defname_ += text__content_.nodeValue
- self.defname = defname_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'array':
- array_ = ''
- for text__content_ in child_.childNodes:
- array_ += text__content_.nodeValue
- self.array = array_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'defval':
- obj_ = linkedTextType.factory()
- obj_.build(child_)
- self.set_defval(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'briefdescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_briefdescription(obj_)
-# end class paramType
-
-
-class declname(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if declname.subclass:
- return declname.subclass(*args_, **kwargs_)
- else:
- return declname(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='declname')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='declname'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='declname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='declname'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class declname
-
-
-class defname(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if defname.subclass:
- return defname.subclass(*args_, **kwargs_)
- else:
- return defname(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='defname')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='defname'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='defname'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='defname'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class defname
-
-
-class array(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if array.subclass:
- return array.subclass(*args_, **kwargs_)
- else:
- return array(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='array')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='array'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='array'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='array'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class array
-
-
-class linkedTextType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, ref=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if linkedTextType.subclass:
- return linkedTextType.subclass(*args_, **kwargs_)
- else:
- return linkedTextType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ref(self): return self.ref
- def set_ref(self, ref): self.ref = ref
- def add_ref(self, value): self.ref.append(value)
- def insert_ref(self, index, value): self.ref[index] = value
- def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='linkedTextType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.ref is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='linkedTextType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
- childobj_ = docRefTextType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class linkedTextType
-
-
-class graphType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, node=None):
- if node is None:
- self.node = []
- else:
- self.node = node
- def factory(*args_, **kwargs_):
- if graphType.subclass:
- return graphType.subclass(*args_, **kwargs_)
- else:
- return graphType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_node(self): return self.node
- def set_node(self, node): self.node = node
- def add_node(self, value): self.node.append(value)
- def insert_node(self, index, value): self.node[index] = value
- def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='graphType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='graphType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='graphType'):
- for node_ in self.node:
- node_.export(outfile, level, namespace_, name_='node')
- def hasContent_(self):
- if (
- self.node is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='graphType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('node=[\n')
- level += 1
- for node in self.node:
- showIndent(outfile, level)
- outfile.write('model_.node(\n')
- node.exportLiteral(outfile, level, name_='node')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'node':
- obj_ = nodeType.factory()
- obj_.build(child_)
- self.node.append(obj_)
-# end class graphType
-
-
-class nodeType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, label=None, link=None, childnode=None):
- self.id = id
- self.label = label
- self.link = link
- if childnode is None:
- self.childnode = []
- else:
- self.childnode = childnode
- def factory(*args_, **kwargs_):
- if nodeType.subclass:
- return nodeType.subclass(*args_, **kwargs_)
- else:
- return nodeType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_label(self): return self.label
- def set_label(self, label): self.label = label
- def get_link(self): return self.link
- def set_link(self, link): self.link = link
- def get_childnode(self): return self.childnode
- def set_childnode(self, childnode): self.childnode = childnode
- def add_childnode(self, value): self.childnode.append(value)
- def insert_childnode(self, index, value): self.childnode[index] = value
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='nodeType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='nodeType'):
- if self.label is not None:
- showIndent(outfile, level)
- outfile.write('<%slabel>%s</%slabel>\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_))
- if self.link:
- self.link.export(outfile, level, namespace_, name_='link')
- for childnode_ in self.childnode:
- childnode_.export(outfile, level, namespace_, name_='childnode')
- def hasContent_(self):
- if (
- self.label is not None or
- self.link is not None or
- self.childnode is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='nodeType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding))
- if self.link:
- showIndent(outfile, level)
- outfile.write('link=model_.linkType(\n')
- self.link.exportLiteral(outfile, level, name_='link')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('childnode=[\n')
- level += 1
- for childnode in self.childnode:
- showIndent(outfile, level)
- outfile.write('model_.childnode(\n')
- childnode.exportLiteral(outfile, level, name_='childnode')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'label':
- label_ = ''
- for text__content_ in child_.childNodes:
- label_ += text__content_.nodeValue
- self.label = label_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'link':
- obj_ = linkType.factory()
- obj_.build(child_)
- self.set_link(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'childnode':
- obj_ = childnodeType.factory()
- obj_.build(child_)
- self.childnode.append(obj_)
-# end class nodeType
-
-
-class label(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if label.subclass:
- return label.subclass(*args_, **kwargs_)
- else:
- return label(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='label')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='label'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='label'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='label'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class label
-
-
-class childnodeType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, relation=None, refid=None, edgelabel=None):
- self.relation = relation
- self.refid = refid
- if edgelabel is None:
- self.edgelabel = []
- else:
- self.edgelabel = edgelabel
- def factory(*args_, **kwargs_):
- if childnodeType.subclass:
- return childnodeType.subclass(*args_, **kwargs_)
- else:
- return childnodeType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_edgelabel(self): return self.edgelabel
- def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel
- def add_edgelabel(self, value): self.edgelabel.append(value)
- def insert_edgelabel(self, index, value): self.edgelabel[index] = value
- def get_relation(self): return self.relation
- def set_relation(self, relation): self.relation = relation
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='childnodeType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'):
- if self.relation is not None:
- outfile.write(' relation=%s' % (quote_attrib(self.relation), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'):
- for edgelabel_ in self.edgelabel:
- showIndent(outfile, level)
- outfile.write('<%sedgelabel>%s</%sedgelabel>\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_))
- def hasContent_(self):
- if (
- self.edgelabel is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='childnodeType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.relation is not None:
- showIndent(outfile, level)
- outfile.write('relation = "%s",\n' % (self.relation,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('edgelabel=[\n')
- level += 1
- for edgelabel in self.edgelabel:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('relation'):
- self.relation = attrs.get('relation').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'edgelabel':
- edgelabel_ = ''
- for text__content_ in child_.childNodes:
- edgelabel_ += text__content_.nodeValue
- self.edgelabel.append(edgelabel_)
-# end class childnodeType
-
-
-class edgelabel(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if edgelabel.subclass:
- return edgelabel.subclass(*args_, **kwargs_)
- else:
- return edgelabel(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='edgelabel')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='edgelabel'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class edgelabel
-
-
-class linkType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, refid=None, external=None, valueOf_=''):
- self.refid = refid
- self.external = external
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if linkType.subclass:
- return linkType.subclass(*args_, **kwargs_)
- else:
- return linkType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def get_external(self): return self.external
- def set_external(self, external): self.external = external
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='linkType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='linkType'):
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='linkType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='linkType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- if self.external is not None:
- showIndent(outfile, level)
- outfile.write('external = %s,\n' % (self.external,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- if attrs.get('external'):
- self.external = attrs.get('external').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class linkType
-
-
-class listingType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, codeline=None):
- if codeline is None:
- self.codeline = []
- else:
- self.codeline = codeline
- def factory(*args_, **kwargs_):
- if listingType.subclass:
- return listingType.subclass(*args_, **kwargs_)
- else:
- return listingType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_codeline(self): return self.codeline
- def set_codeline(self, codeline): self.codeline = codeline
- def add_codeline(self, value): self.codeline.append(value)
- def insert_codeline(self, index, value): self.codeline[index] = value
- def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='listingType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='listingType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='listingType'):
- for codeline_ in self.codeline:
- codeline_.export(outfile, level, namespace_, name_='codeline')
- def hasContent_(self):
- if (
- self.codeline is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='listingType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('codeline=[\n')
- level += 1
- for codeline in self.codeline:
- showIndent(outfile, level)
- outfile.write('model_.codeline(\n')
- codeline.exportLiteral(outfile, level, name_='codeline')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'codeline':
- obj_ = codelineType.factory()
- obj_.build(child_)
- self.codeline.append(obj_)
-# end class listingType
-
-
-class codelineType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
- self.external = external
- self.lineno = lineno
- self.refkind = refkind
- self.refid = refid
- if highlight is None:
- self.highlight = []
- else:
- self.highlight = highlight
- def factory(*args_, **kwargs_):
- if codelineType.subclass:
- return codelineType.subclass(*args_, **kwargs_)
- else:
- return codelineType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_highlight(self): return self.highlight
- def set_highlight(self, highlight): self.highlight = highlight
- def add_highlight(self, value): self.highlight.append(value)
- def insert_highlight(self, index, value): self.highlight[index] = value
- def get_external(self): return self.external
- def set_external(self, external): self.external = external
- def get_lineno(self): return self.lineno
- def set_lineno(self, lineno): self.lineno = lineno
- def get_refkind(self): return self.refkind
- def set_refkind(self, refkind): self.refkind = refkind
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='codelineType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'):
- if self.external is not None:
- outfile.write(' external=%s' % (quote_attrib(self.external), ))
- if self.lineno is not None:
- outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno'))
- if self.refkind is not None:
- outfile.write(' refkind=%s' % (quote_attrib(self.refkind), ))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='codelineType'):
- for highlight_ in self.highlight:
- highlight_.export(outfile, level, namespace_, name_='highlight')
- def hasContent_(self):
- if (
- self.highlight is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='codelineType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.external is not None:
- showIndent(outfile, level)
- outfile.write('external = "%s",\n' % (self.external,))
- if self.lineno is not None:
- showIndent(outfile, level)
- outfile.write('lineno = %s,\n' % (self.lineno,))
- if self.refkind is not None:
- showIndent(outfile, level)
- outfile.write('refkind = "%s",\n' % (self.refkind,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('highlight=[\n')
- level += 1
- for highlight in self.highlight:
- showIndent(outfile, level)
- outfile.write('model_.highlight(\n')
- highlight.exportLiteral(outfile, level, name_='highlight')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('external'):
- self.external = attrs.get('external').value
- if attrs.get('lineno'):
- try:
- self.lineno = int(attrs.get('lineno').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (lineno): %s' % exp)
- if attrs.get('refkind'):
- self.refkind = attrs.get('refkind').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'highlight':
- obj_ = highlightType.factory()
- obj_.build(child_)
- self.highlight.append(obj_)
-# end class codelineType
-
-
-class highlightType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None):
- self.classxx = classxx
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if highlightType.subclass:
- return highlightType.subclass(*args_, **kwargs_)
- else:
- return highlightType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_sp(self): return self.sp
- def set_sp(self, sp): self.sp = sp
- def add_sp(self, value): self.sp.append(value)
- def insert_sp(self, index, value): self.sp[index] = value
- def get_ref(self): return self.ref
- def set_ref(self, ref): self.ref = ref
- def add_ref(self, value): self.ref.append(value)
- def insert_ref(self, index, value): self.ref[index] = value
- def get_class(self): return self.classxx
- def set_class(self, classxx): self.classxx = classxx
- def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='highlightType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'):
- if self.classxx is not None:
- outfile.write(' class=%s' % (quote_attrib(self.classxx), ))
- def exportChildren(self, outfile, level, namespace_='', name_='highlightType'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.sp is not None or
- self.ref is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='highlightType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.classxx is not None:
- showIndent(outfile, level)
- outfile.write('classxx = "%s",\n' % (self.classxx,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('class'):
- self.classxx = attrs.get('class').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sp':
- value_ = []
- for text_ in child_.childNodes:
- value_.append(text_.nodeValue)
- valuestr_ = ''.join(value_)
- obj_ = self.mixedclass_(MixedContainer.CategorySimple,
- MixedContainer.TypeString, 'sp', valuestr_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
- childobj_ = docRefTextType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class highlightType
-
-
-class sp(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if sp.subclass:
- return sp.subclass(*args_, **kwargs_)
- else:
- return sp(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='sp')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='sp'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='sp'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='sp'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class sp
-
-
-class referenceType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
- self.endline = endline
- self.startline = startline
- self.refid = refid
- self.compoundref = compoundref
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if referenceType.subclass:
- return referenceType.subclass(*args_, **kwargs_)
- else:
- return referenceType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_endline(self): return self.endline
- def set_endline(self, endline): self.endline = endline
- def get_startline(self): return self.startline
- def set_startline(self, startline): self.startline = startline
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def get_compoundref(self): return self.compoundref
- def set_compoundref(self, compoundref): self.compoundref = compoundref
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='referenceType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'):
- if self.endline is not None:
- outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline'))
- if self.startline is not None:
- outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline'))
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- if self.compoundref is not None:
- outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='referenceType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='referenceType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.endline is not None:
- showIndent(outfile, level)
- outfile.write('endline = %s,\n' % (self.endline,))
- if self.startline is not None:
- showIndent(outfile, level)
- outfile.write('startline = %s,\n' % (self.startline,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- if self.compoundref is not None:
- showIndent(outfile, level)
- outfile.write('compoundref = %s,\n' % (self.compoundref,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('endline'):
- try:
- self.endline = int(attrs.get('endline').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (endline): %s' % exp)
- if attrs.get('startline'):
- try:
- self.startline = int(attrs.get('startline').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (startline): %s' % exp)
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- if attrs.get('compoundref'):
- self.compoundref = attrs.get('compoundref').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class referenceType
-
-
-class locationType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
- self.bodystart = bodystart
- self.line = line
- self.bodyend = bodyend
- self.bodyfile = bodyfile
- self.file = file
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if locationType.subclass:
- return locationType.subclass(*args_, **kwargs_)
- else:
- return locationType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_bodystart(self): return self.bodystart
- def set_bodystart(self, bodystart): self.bodystart = bodystart
- def get_line(self): return self.line
- def set_line(self, line): self.line = line
- def get_bodyend(self): return self.bodyend
- def set_bodyend(self, bodyend): self.bodyend = bodyend
- def get_bodyfile(self): return self.bodyfile
- def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile
- def get_file(self): return self.file
- def set_file(self, file): self.file = file
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='locationType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='locationType'):
- if self.bodystart is not None:
- outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart'))
- if self.line is not None:
- outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line'))
- if self.bodyend is not None:
- outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend'))
- if self.bodyfile is not None:
- outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), ))
- if self.file is not None:
- outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='locationType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='locationType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.bodystart is not None:
- showIndent(outfile, level)
- outfile.write('bodystart = %s,\n' % (self.bodystart,))
- if self.line is not None:
- showIndent(outfile, level)
- outfile.write('line = %s,\n' % (self.line,))
- if self.bodyend is not None:
- showIndent(outfile, level)
- outfile.write('bodyend = %s,\n' % (self.bodyend,))
- if self.bodyfile is not None:
- showIndent(outfile, level)
- outfile.write('bodyfile = %s,\n' % (self.bodyfile,))
- if self.file is not None:
- showIndent(outfile, level)
- outfile.write('file = %s,\n' % (self.file,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('bodystart'):
- try:
- self.bodystart = int(attrs.get('bodystart').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (bodystart): %s' % exp)
- if attrs.get('line'):
- try:
- self.line = int(attrs.get('line').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (line): %s' % exp)
- if attrs.get('bodyend'):
- try:
- self.bodyend = int(attrs.get('bodyend').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (bodyend): %s' % exp)
- if attrs.get('bodyfile'):
- self.bodyfile = attrs.get('bodyfile').value
- if attrs.get('file'):
- self.file = attrs.get('file').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class locationType
-
-
-class docSect1Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docSect1Type.subclass:
- return docSect1Type.subclass(*args_, **kwargs_)
- else:
- return docSect1Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect2(self): return self.sect2
- def set_sect2(self, sect2): self.sect2 = sect2
- def add_sect2(self, value): self.sect2.append(value)
- def insert_sect2(self, index, value): self.sect2[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSect1Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None or
- self.sect2 is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docSect1Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- childobj_ = docTitleType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
- childobj_ = docSect2Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- childobj_ = docInternalS1Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docSect1Type
-
-
-class docSect2Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docSect2Type.subclass:
- return docSect2Type.subclass(*args_, **kwargs_)
- else:
- return docSect2Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect3(self): return self.sect3
- def set_sect3(self, sect3): self.sect3 = sect3
- def add_sect3(self, value): self.sect3.append(value)
- def insert_sect3(self, index, value): self.sect3[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSect2Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None or
- self.sect3 is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docSect2Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- childobj_ = docTitleType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
- childobj_ = docSect3Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- childobj_ = docInternalS2Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docSect2Type
-
-
-class docSect3Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docSect3Type.subclass:
- return docSect3Type.subclass(*args_, **kwargs_)
- else:
- return docSect3Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect4(self): return self.sect4
- def set_sect4(self, sect4): self.sect4 = sect4
- def add_sect4(self, value): self.sect4.append(value)
- def insert_sect4(self, index, value): self.sect4[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSect3Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None or
- self.sect4 is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docSect3Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- childobj_ = docTitleType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect4':
- childobj_ = docSect4Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect4', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- childobj_ = docInternalS3Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docSect3Type
-
-
-class docSect4Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docSect4Type.subclass:
- return docSect4Type.subclass(*args_, **kwargs_)
- else:
- return docSect4Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSect4Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docSect4Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- childobj_ = docTitleType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'title', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- childobj_ = docInternalS4Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'internal', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docSect4Type
-
-
-class docInternalType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docInternalType.subclass:
- return docInternalType.subclass(*args_, **kwargs_)
- else:
- return docInternalType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect1(self): return self.sect1
- def set_sect1(self, sect1): self.sect1 = sect1
- def add_sect1(self, value): self.sect1.append(value)
- def insert_sect1(self, index, value): self.sect1[index] = value
- def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.para is not None or
- self.sect1 is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docInternalType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
- childobj_ = docSect1Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect1', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docInternalType
-
-
-class docInternalS1Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docInternalS1Type.subclass:
- return docInternalS1Type.subclass(*args_, **kwargs_)
- else:
- return docInternalS1Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect2(self): return self.sect2
- def set_sect2(self, sect2): self.sect2 = sect2
- def add_sect2(self, value): self.sect2.append(value)
- def insert_sect2(self, index, value): self.sect2[index] = value
- def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.para is not None or
- self.sect2 is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docInternalS1Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect2':
- childobj_ = docSect2Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect2', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docInternalS1Type
-
-
-class docInternalS2Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docInternalS2Type.subclass:
- return docInternalS2Type.subclass(*args_, **kwargs_)
- else:
- return docInternalS2Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect3(self): return self.sect3
- def set_sect3(self, sect3): self.sect3 = sect3
- def add_sect3(self, value): self.sect3.append(value)
- def insert_sect3(self, index, value): self.sect3[index] = value
- def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.para is not None or
- self.sect3 is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docInternalS2Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
- childobj_ = docSect3Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docInternalS2Type
-
-
-class docInternalS3Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docInternalS3Type.subclass:
- return docInternalS3Type.subclass(*args_, **kwargs_)
- else:
- return docInternalS3Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect3(self): return self.sect3
- def set_sect3(self, sect3): self.sect3 = sect3
- def add_sect3(self, value): self.sect3.append(value)
- def insert_sect3(self, index, value): self.sect3[index] = value
- def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.para is not None or
- self.sect3 is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docInternalS3Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect3':
- childobj_ = docSect4Type.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'sect3', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docInternalS3Type
-
-
-class docInternalS4Type(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None, mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docInternalS4Type.subclass:
- return docInternalS4Type.subclass(*args_, **kwargs_)
- else:
- return docInternalS4Type(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.para is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docInternalS4Type'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- childobj_ = docParaType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'para', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docInternalS4Type
-
-
-class docTitleType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_='', mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docTitleType.subclass:
- return docTitleType.subclass(*args_, **kwargs_)
- else:
- return docTitleType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTitleType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docTitleType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docTitleType
-
-
-class docParaType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_='', mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docParaType.subclass:
- return docParaType.subclass(*args_, **kwargs_)
- else:
- return docParaType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParaType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docParaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docParaType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docParaType
-
-
-class docMarkupType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_='', mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docMarkupType.subclass:
- return docMarkupType.subclass(*args_, **kwargs_)
- else:
- return docMarkupType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docMarkupType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docMarkupType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docMarkupType
-
-
-class docURLLink(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
- self.url = url
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docURLLink.subclass:
- return docURLLink.subclass(*args_, **kwargs_)
- else:
- return docURLLink(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_url(self): return self.url
- def set_url(self, url): self.url = url
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docURLLink')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'):
- if self.url is not None:
- outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docURLLink'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.url is not None:
- showIndent(outfile, level)
- outfile.write('url = %s,\n' % (self.url,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('url'):
- self.url = attrs.get('url').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docURLLink
-
-
-class docAnchorType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docAnchorType.subclass:
- return docAnchorType.subclass(*args_, **kwargs_)
- else:
- return docAnchorType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docAnchorType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docAnchorType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docAnchorType
-
-
-class docFormulaType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docFormulaType.subclass:
- return docFormulaType.subclass(*args_, **kwargs_)
- else:
- return docFormulaType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docFormulaType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docFormulaType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docFormulaType
-
-
-class docIndexEntryType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, primaryie=None, secondaryie=None):
- self.primaryie = primaryie
- self.secondaryie = secondaryie
- def factory(*args_, **kwargs_):
- if docIndexEntryType.subclass:
- return docIndexEntryType.subclass(*args_, **kwargs_)
- else:
- return docIndexEntryType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_primaryie(self): return self.primaryie
- def set_primaryie(self, primaryie): self.primaryie = primaryie
- def get_secondaryie(self): return self.secondaryie
- def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie
- def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'):
- if self.primaryie is not None:
- showIndent(outfile, level)
- outfile.write('<%sprimaryie>%s</%sprimaryie>\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_))
- if self.secondaryie is not None:
- showIndent(outfile, level)
- outfile.write('<%ssecondaryie>%s</%ssecondaryie>\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_))
- def hasContent_(self):
- if (
- self.primaryie is not None or
- self.secondaryie is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docIndexEntryType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'primaryie':
- primaryie_ = ''
- for text__content_ in child_.childNodes:
- primaryie_ += text__content_.nodeValue
- self.primaryie = primaryie_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'secondaryie':
- secondaryie_ = ''
- for text__content_ in child_.childNodes:
- secondaryie_ += text__content_.nodeValue
- self.secondaryie = secondaryie_
-# end class docIndexEntryType
-
-
-class docListType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, listitem=None):
- if listitem is None:
- self.listitem = []
- else:
- self.listitem = listitem
- def factory(*args_, **kwargs_):
- if docListType.subclass:
- return docListType.subclass(*args_, **kwargs_)
- else:
- return docListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_listitem(self): return self.listitem
- def set_listitem(self, listitem): self.listitem = listitem
- def add_listitem(self, value): self.listitem.append(value)
- def insert_listitem(self, index, value): self.listitem[index] = value
- def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docListType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docListType'):
- for listitem_ in self.listitem:
- listitem_.export(outfile, level, namespace_, name_='listitem')
- def hasContent_(self):
- if (
- self.listitem is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('listitem=[\n')
- level += 1
- for listitem in self.listitem:
- showIndent(outfile, level)
- outfile.write('model_.listitem(\n')
- listitem.exportLiteral(outfile, level, name_='listitem')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'listitem':
- obj_ = docListItemType.factory()
- obj_.build(child_)
- self.listitem.append(obj_)
-# end class docListType
-
-
-class docListItemType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, para=None):
- if para is None:
- self.para = []
- else:
- self.para = para
- def factory(*args_, **kwargs_):
- if docListItemType.subclass:
- return docListItemType.subclass(*args_, **kwargs_)
- else:
- return docListItemType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docListItemType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'):
- for para_ in self.para:
- para_.export(outfile, level, namespace_, name_='para')
- def hasContent_(self):
- if (
- self.para is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docListItemType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('para=[\n')
- level += 1
- for para in self.para:
- showIndent(outfile, level)
- outfile.write('model_.para(\n')
- para.exportLiteral(outfile, level, name_='para')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- obj_ = docParaType.factory()
- obj_.build(child_)
- self.para.append(obj_)
-# end class docListItemType
-
-
-class docSimpleSectType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, title=None, para=None):
- self.kind = kind
- self.title = title
- if para is None:
- self.para = []
- else:
- self.para = para
- def factory(*args_, **kwargs_):
- if docSimpleSectType.subclass:
- return docSimpleSectType.subclass(*args_, **kwargs_)
- else:
- return docSimpleSectType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_title(self): return self.title
- def set_title(self, title): self.title = title
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'):
- if self.kind is not None:
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'):
- if self.title:
- self.title.export(outfile, level, namespace_, name_='title')
- for para_ in self.para:
- para_.export(outfile, level, namespace_, name_='para')
- def hasContent_(self):
- if (
- self.title is not None or
- self.para is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docSimpleSectType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- def exportLiteralChildren(self, outfile, level, name_):
- if self.title:
- showIndent(outfile, level)
- outfile.write('title=model_.docTitleType(\n')
- self.title.exportLiteral(outfile, level, name_='title')
- showIndent(outfile, level)
- outfile.write('),\n')
- showIndent(outfile, level)
- outfile.write('para=[\n')
- level += 1
- for para in self.para:
- showIndent(outfile, level)
- outfile.write('model_.para(\n')
- para.exportLiteral(outfile, level, name_='para')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'title':
- obj_ = docTitleType.factory()
- obj_.build(child_)
- self.set_title(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- obj_ = docParaType.factory()
- obj_.build(child_)
- self.para.append(obj_)
-# end class docSimpleSectType
-
-
-class docVarListEntryType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, term=None):
- self.term = term
- def factory(*args_, **kwargs_):
- if docVarListEntryType.subclass:
- return docVarListEntryType.subclass(*args_, **kwargs_)
- else:
- return docVarListEntryType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_term(self): return self.term
- def set_term(self, term): self.term = term
- def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
- if self.term:
- self.term.export(outfile, level, namespace_, name_='term', )
- def hasContent_(self):
- if (
- self.term is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- if self.term:
- showIndent(outfile, level)
- outfile.write('term=model_.docTitleType(\n')
- self.term.exportLiteral(outfile, level, name_='term')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'term':
- obj_ = docTitleType.factory()
- obj_.build(child_)
- self.set_term(obj_)
-# end class docVarListEntryType
-
-
-class docVariableListType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if docVariableListType.subclass:
- return docVariableListType.subclass(*args_, **kwargs_)
- else:
- return docVariableListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docVariableListType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docVariableListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docVariableListType
-
-
-class docRefTextType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
- self.refid = refid
- self.kindref = kindref
- self.external = external
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docRefTextType.subclass:
- return docRefTextType.subclass(*args_, **kwargs_)
- else:
- return docRefTextType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def get_kindref(self): return self.kindref
- def set_kindref(self, kindref): self.kindref = kindref
- def get_external(self): return self.external
- def set_external(self, external): self.external = external
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docRefTextType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'):
- if self.refid is not None:
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- if self.kindref is not None:
- outfile.write(' kindref=%s' % (quote_attrib(self.kindref), ))
- if self.external is not None:
- outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docRefTextType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- if self.kindref is not None:
- showIndent(outfile, level)
- outfile.write('kindref = "%s",\n' % (self.kindref,))
- if self.external is not None:
- showIndent(outfile, level)
- outfile.write('external = %s,\n' % (self.external,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- if attrs.get('kindref'):
- self.kindref = attrs.get('kindref').value
- if attrs.get('external'):
- self.external = attrs.get('external').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docRefTextType
-
-
-class docTableType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, rows=None, cols=None, row=None, caption=None):
- self.rows = rows
- self.cols = cols
- if row is None:
- self.row = []
- else:
- self.row = row
- self.caption = caption
- def factory(*args_, **kwargs_):
- if docTableType.subclass:
- return docTableType.subclass(*args_, **kwargs_)
- else:
- return docTableType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_row(self): return self.row
- def set_row(self, row): self.row = row
- def add_row(self, value): self.row.append(value)
- def insert_row(self, index, value): self.row[index] = value
- def get_caption(self): return self.caption
- def set_caption(self, caption): self.caption = caption
- def get_rows(self): return self.rows
- def set_rows(self, rows): self.rows = rows
- def get_cols(self): return self.cols
- def set_cols(self, cols): self.cols = cols
- def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTableType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'):
- if self.rows is not None:
- outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows'))
- if self.cols is not None:
- outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols'))
- def exportChildren(self, outfile, level, namespace_='', name_='docTableType'):
- for row_ in self.row:
- row_.export(outfile, level, namespace_, name_='row')
- if self.caption:
- self.caption.export(outfile, level, namespace_, name_='caption')
- def hasContent_(self):
- if (
- self.row is not None or
- self.caption is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docTableType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.rows is not None:
- showIndent(outfile, level)
- outfile.write('rows = %s,\n' % (self.rows,))
- if self.cols is not None:
- showIndent(outfile, level)
- outfile.write('cols = %s,\n' % (self.cols,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('row=[\n')
- level += 1
- for row in self.row:
- showIndent(outfile, level)
- outfile.write('model_.row(\n')
- row.exportLiteral(outfile, level, name_='row')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.caption:
- showIndent(outfile, level)
- outfile.write('caption=model_.docCaptionType(\n')
- self.caption.exportLiteral(outfile, level, name_='caption')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('rows'):
- try:
- self.rows = int(attrs.get('rows').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (rows): %s' % exp)
- if attrs.get('cols'):
- try:
- self.cols = int(attrs.get('cols').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (cols): %s' % exp)
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'row':
- obj_ = docRowType.factory()
- obj_.build(child_)
- self.row.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'caption':
- obj_ = docCaptionType.factory()
- obj_.build(child_)
- self.set_caption(obj_)
-# end class docTableType
-
-
-class docRowType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, entry=None):
- if entry is None:
- self.entry = []
- else:
- self.entry = entry
- def factory(*args_, **kwargs_):
- if docRowType.subclass:
- return docRowType.subclass(*args_, **kwargs_)
- else:
- return docRowType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_entry(self): return self.entry
- def set_entry(self, entry): self.entry = entry
- def add_entry(self, value): self.entry.append(value)
- def insert_entry(self, index, value): self.entry[index] = value
- def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docRowType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docRowType'):
- for entry_ in self.entry:
- entry_.export(outfile, level, namespace_, name_='entry')
- def hasContent_(self):
- if (
- self.entry is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docRowType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('entry=[\n')
- level += 1
- for entry in self.entry:
- showIndent(outfile, level)
- outfile.write('model_.entry(\n')
- entry.exportLiteral(outfile, level, name_='entry')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'entry':
- obj_ = docEntryType.factory()
- obj_.build(child_)
- self.entry.append(obj_)
-# end class docRowType
-
-
-class docEntryType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, thead=None, para=None):
- self.thead = thead
- if para is None:
- self.para = []
- else:
- self.para = para
- def factory(*args_, **kwargs_):
- if docEntryType.subclass:
- return docEntryType.subclass(*args_, **kwargs_)
- else:
- return docEntryType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_thead(self): return self.thead
- def set_thead(self, thead): self.thead = thead
- def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docEntryType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'):
- if self.thead is not None:
- outfile.write(' thead=%s' % (quote_attrib(self.thead), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'):
- for para_ in self.para:
- para_.export(outfile, level, namespace_, name_='para')
- def hasContent_(self):
- if (
- self.para is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docEntryType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.thead is not None:
- showIndent(outfile, level)
- outfile.write('thead = "%s",\n' % (self.thead,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('para=[\n')
- level += 1
- for para in self.para:
- showIndent(outfile, level)
- outfile.write('model_.para(\n')
- para.exportLiteral(outfile, level, name_='para')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('thead'):
- self.thead = attrs.get('thead').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- obj_ = docParaType.factory()
- obj_.build(child_)
- self.para.append(obj_)
-# end class docEntryType
-
-
-class docCaptionType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_='', mixedclass_=None, content_=None):
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docCaptionType.subclass:
- return docCaptionType.subclass(*args_, **kwargs_)
- else:
- return docCaptionType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docCaptionType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docCaptionType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docCaptionType
-
-
-class docHeadingType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
- self.level = level
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docHeadingType.subclass:
- return docHeadingType.subclass(*args_, **kwargs_)
- else:
- return docHeadingType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_level(self): return self.level
- def set_level(self, level): self.level = level
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docHeadingType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'):
- if self.level is not None:
- outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level'))
- def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docHeadingType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.level is not None:
- showIndent(outfile, level)
- outfile.write('level = %s,\n' % (self.level,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('level'):
- try:
- self.level = int(attrs.get('level').value)
- except ValueError as exp:
- raise ValueError('Bad integer attribute (level): %s' % exp)
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docHeadingType
-
-
-class docImageType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
- self.width = width
- self.type_ = type_
- self.name = name
- self.height = height
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docImageType.subclass:
- return docImageType.subclass(*args_, **kwargs_)
- else:
- return docImageType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_width(self): return self.width
- def set_width(self, width): self.width = width
- def get_type(self): return self.type_
- def set_type(self, type_): self.type_ = type_
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_height(self): return self.height
- def set_height(self, height): self.height = height
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docImageType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'):
- if self.width is not None:
- outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), ))
- if self.type_ is not None:
- outfile.write(' type=%s' % (quote_attrib(self.type_), ))
- if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
- if self.height is not None:
- outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docImageType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docImageType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.width is not None:
- showIndent(outfile, level)
- outfile.write('width = %s,\n' % (self.width,))
- if self.type_ is not None:
- showIndent(outfile, level)
- outfile.write('type_ = "%s",\n' % (self.type_,))
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('name = %s,\n' % (self.name,))
- if self.height is not None:
- showIndent(outfile, level)
- outfile.write('height = %s,\n' % (self.height,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('width'):
- self.width = attrs.get('width').value
- if attrs.get('type'):
- self.type_ = attrs.get('type').value
- if attrs.get('name'):
- self.name = attrs.get('name').value
- if attrs.get('height'):
- self.height = attrs.get('height').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docImageType
-
-
-class docDotFileType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
- self.name = name
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docDotFileType.subclass:
- return docDotFileType.subclass(*args_, **kwargs_)
- else:
- return docDotFileType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docDotFileType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'):
- if self.name is not None:
- outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docDotFileType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('name = %s,\n' % (self.name,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('name'):
- self.name = attrs.get('name').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docDotFileType
-
-
-class docTocItemType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
- self.id = id
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docTocItemType.subclass:
- return docTocItemType.subclass(*args_, **kwargs_)
- else:
- return docTocItemType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocItemType')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docTocItemType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docTocItemType
-
-
-class docTocListType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, tocitem=None):
- if tocitem is None:
- self.tocitem = []
- else:
- self.tocitem = tocitem
- def factory(*args_, **kwargs_):
- if docTocListType.subclass:
- return docTocListType.subclass(*args_, **kwargs_)
- else:
- return docTocListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_tocitem(self): return self.tocitem
- def set_tocitem(self, tocitem): self.tocitem = tocitem
- def add_tocitem(self, value): self.tocitem.append(value)
- def insert_tocitem(self, index, value): self.tocitem[index] = value
- def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docTocListType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'):
- for tocitem_ in self.tocitem:
- tocitem_.export(outfile, level, namespace_, name_='tocitem')
- def hasContent_(self):
- if (
- self.tocitem is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docTocListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('tocitem=[\n')
- level += 1
- for tocitem in self.tocitem:
- showIndent(outfile, level)
- outfile.write('model_.tocitem(\n')
- tocitem.exportLiteral(outfile, level, name_='tocitem')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'tocitem':
- obj_ = docTocItemType.factory()
- obj_.build(child_)
- self.tocitem.append(obj_)
-# end class docTocListType
-
-
-class docLanguageType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, langid=None, para=None):
- self.langid = langid
- if para is None:
- self.para = []
- else:
- self.para = para
- def factory(*args_, **kwargs_):
- if docLanguageType.subclass:
- return docLanguageType.subclass(*args_, **kwargs_)
- else:
- return docLanguageType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_langid(self): return self.langid
- def set_langid(self, langid): self.langid = langid
- def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docLanguageType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'):
- if self.langid is not None:
- outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'):
- for para_ in self.para:
- para_.export(outfile, level, namespace_, name_='para')
- def hasContent_(self):
- if (
- self.para is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docLanguageType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.langid is not None:
- showIndent(outfile, level)
- outfile.write('langid = %s,\n' % (self.langid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('para=[\n')
- level += 1
- for para in self.para:
- showIndent(outfile, level)
- outfile.write('model_.para(\n')
- para.exportLiteral(outfile, level, name_='para')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('langid'):
- self.langid = attrs.get('langid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- obj_ = docParaType.factory()
- obj_.build(child_)
- self.para.append(obj_)
-# end class docLanguageType
-
-
-class docParamListType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, parameteritem=None):
- self.kind = kind
- if parameteritem is None:
- self.parameteritem = []
- else:
- self.parameteritem = parameteritem
- def factory(*args_, **kwargs_):
- if docParamListType.subclass:
- return docParamListType.subclass(*args_, **kwargs_)
- else:
- return docParamListType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_parameteritem(self): return self.parameteritem
- def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem
- def add_parameteritem(self, value): self.parameteritem.append(value)
- def insert_parameteritem(self, index, value): self.parameteritem[index] = value
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'):
- if self.kind is not None:
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'):
- for parameteritem_ in self.parameteritem:
- parameteritem_.export(outfile, level, namespace_, name_='parameteritem')
- def hasContent_(self):
- if (
- self.parameteritem is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docParamListType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('parameteritem=[\n')
- level += 1
- for parameteritem in self.parameteritem:
- showIndent(outfile, level)
- outfile.write('model_.parameteritem(\n')
- parameteritem.exportLiteral(outfile, level, name_='parameteritem')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameteritem':
- obj_ = docParamListItem.factory()
- obj_.build(child_)
- self.parameteritem.append(obj_)
-# end class docParamListType
-
-
-class docParamListItem(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, parameternamelist=None, parameterdescription=None):
- if parameternamelist is None:
- self.parameternamelist = []
- else:
- self.parameternamelist = parameternamelist
- self.parameterdescription = parameterdescription
- def factory(*args_, **kwargs_):
- if docParamListItem.subclass:
- return docParamListItem.subclass(*args_, **kwargs_)
- else:
- return docParamListItem(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_parameternamelist(self): return self.parameternamelist
- def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist
- def add_parameternamelist(self, value): self.parameternamelist.append(value)
- def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value
- def get_parameterdescription(self): return self.parameterdescription
- def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription
- def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamListItem')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'):
- for parameternamelist_ in self.parameternamelist:
- parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist')
- if self.parameterdescription:
- self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', )
- def hasContent_(self):
- if (
- self.parameternamelist is not None or
- self.parameterdescription is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docParamListItem'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('parameternamelist=[\n')
- level += 1
- for parameternamelist in self.parameternamelist:
- showIndent(outfile, level)
- outfile.write('model_.parameternamelist(\n')
- parameternamelist.exportLiteral(outfile, level, name_='parameternamelist')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.parameterdescription:
- showIndent(outfile, level)
- outfile.write('parameterdescription=model_.descriptionType(\n')
- self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameternamelist':
- obj_ = docParamNameList.factory()
- obj_.build(child_)
- self.parameternamelist.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parameterdescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_parameterdescription(obj_)
-# end class docParamListItem
-
-
-class docParamNameList(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, parametername=None):
- if parametername is None:
- self.parametername = []
- else:
- self.parametername = parametername
- def factory(*args_, **kwargs_):
- if docParamNameList.subclass:
- return docParamNameList.subclass(*args_, **kwargs_)
- else:
- return docParamNameList(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_parametername(self): return self.parametername
- def set_parametername(self, parametername): self.parametername = parametername
- def add_parametername(self, value): self.parametername.append(value)
- def insert_parametername(self, index, value): self.parametername[index] = value
- def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamNameList')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'):
- for parametername_ in self.parametername:
- parametername_.export(outfile, level, namespace_, name_='parametername')
- def hasContent_(self):
- if (
- self.parametername is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docParamNameList'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('parametername=[\n')
- level += 1
- for parametername in self.parametername:
- showIndent(outfile, level)
- outfile.write('model_.parametername(\n')
- parametername.exportLiteral(outfile, level, name_='parametername')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'parametername':
- obj_ = docParamName.factory()
- obj_.build(child_)
- self.parametername.append(obj_)
-# end class docParamNameList
-
-
-class docParamName(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
- self.direction = direction
- if mixedclass_ is None:
- self.mixedclass_ = MixedContainer
- else:
- self.mixedclass_ = mixedclass_
- if content_ is None:
- self.content_ = []
- else:
- self.content_ = content_
- def factory(*args_, **kwargs_):
- if docParamName.subclass:
- return docParamName.subclass(*args_, **kwargs_)
- else:
- return docParamName(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_ref(self): return self.ref
- def set_ref(self, ref): self.ref = ref
- def get_direction(self): return self.direction
- def set_direction(self, direction): self.direction = direction
- def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docParamName')
- outfile.write('>')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'):
- if self.direction is not None:
- outfile.write(' direction=%s' % (quote_attrib(self.direction), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docParamName'):
- for item_ in self.content_:
- item_.export(outfile, level, item_.name, namespace_)
- def hasContent_(self):
- if (
- self.ref is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docParamName'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.direction is not None:
- showIndent(outfile, level)
- outfile.write('direction = "%s",\n' % (self.direction,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('content_ = [\n')
- for item_ in self.content_:
- item_.exportLiteral(outfile, level, name_)
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('direction'):
- self.direction = attrs.get('direction').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'ref':
- childobj_ = docRefTextType.factory()
- childobj_.build(child_)
- obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
- MixedContainer.TypeNone, 'ref', childobj_)
- self.content_.append(obj_)
- elif child_.nodeType == Node.TEXT_NODE:
- obj_ = self.mixedclass_(MixedContainer.CategoryText,
- MixedContainer.TypeNone, '', child_.nodeValue)
- self.content_.append(obj_)
-# end class docParamName
-
-
-class docXRefSectType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, id=None, xreftitle=None, xrefdescription=None):
- self.id = id
- if xreftitle is None:
- self.xreftitle = []
- else:
- self.xreftitle = xreftitle
- self.xrefdescription = xrefdescription
- def factory(*args_, **kwargs_):
- if docXRefSectType.subclass:
- return docXRefSectType.subclass(*args_, **kwargs_)
- else:
- return docXRefSectType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_xreftitle(self): return self.xreftitle
- def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle
- def add_xreftitle(self, value): self.xreftitle.append(value)
- def insert_xreftitle(self, index, value): self.xreftitle[index] = value
- def get_xrefdescription(self): return self.xrefdescription
- def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription
- def get_id(self): return self.id
- def set_id(self, id): self.id = id
- def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
- if self.id is not None:
- outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
- for xreftitle_ in self.xreftitle:
- showIndent(outfile, level)
- outfile.write('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_))
- if self.xrefdescription:
- self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', )
- def hasContent_(self):
- if (
- self.xreftitle is not None or
- self.xrefdescription is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docXRefSectType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.id is not None:
- showIndent(outfile, level)
- outfile.write('id = %s,\n' % (self.id,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('xreftitle=[\n')
- level += 1
- for xreftitle in self.xreftitle:
- showIndent(outfile, level)
- outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding))
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.xrefdescription:
- showIndent(outfile, level)
- outfile.write('xrefdescription=model_.descriptionType(\n')
- self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('id'):
- self.id = attrs.get('id').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xreftitle':
- xreftitle_ = ''
- for text__content_ in child_.childNodes:
- xreftitle_ += text__content_.nodeValue
- self.xreftitle.append(xreftitle_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'xrefdescription':
- obj_ = descriptionType.factory()
- obj_.build(child_)
- self.set_xrefdescription(obj_)
-# end class docXRefSectType
-
-
-class docCopyType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, link=None, para=None, sect1=None, internal=None):
- self.link = link
- if para is None:
- self.para = []
- else:
- self.para = para
- if sect1 is None:
- self.sect1 = []
- else:
- self.sect1 = sect1
- self.internal = internal
- def factory(*args_, **kwargs_):
- if docCopyType.subclass:
- return docCopyType.subclass(*args_, **kwargs_)
- else:
- return docCopyType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_para(self): return self.para
- def set_para(self, para): self.para = para
- def add_para(self, value): self.para.append(value)
- def insert_para(self, index, value): self.para[index] = value
- def get_sect1(self): return self.sect1
- def set_sect1(self, sect1): self.sect1 = sect1
- def add_sect1(self, value): self.sect1.append(value)
- def insert_sect1(self, index, value): self.sect1[index] = value
- def get_internal(self): return self.internal
- def set_internal(self, internal): self.internal = internal
- def get_link(self): return self.link
- def set_link(self, link): self.link = link
- def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docCopyType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'):
- if self.link is not None:
- outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'):
- for para_ in self.para:
- para_.export(outfile, level, namespace_, name_='para')
- for sect1_ in self.sect1:
- sect1_.export(outfile, level, namespace_, name_='sect1')
- if self.internal:
- self.internal.export(outfile, level, namespace_, name_='internal')
- def hasContent_(self):
- if (
- self.para is not None or
- self.sect1 is not None or
- self.internal is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docCopyType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.link is not None:
- showIndent(outfile, level)
- outfile.write('link = %s,\n' % (self.link,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('para=[\n')
- level += 1
- for para in self.para:
- showIndent(outfile, level)
- outfile.write('model_.para(\n')
- para.exportLiteral(outfile, level, name_='para')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- showIndent(outfile, level)
- outfile.write('sect1=[\n')
- level += 1
- for sect1 in self.sect1:
- showIndent(outfile, level)
- outfile.write('model_.sect1(\n')
- sect1.exportLiteral(outfile, level, name_='sect1')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- if self.internal:
- showIndent(outfile, level)
- outfile.write('internal=model_.docInternalType(\n')
- self.internal.exportLiteral(outfile, level, name_='internal')
- showIndent(outfile, level)
- outfile.write('),\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('link'):
- self.link = attrs.get('link').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'para':
- obj_ = docParaType.factory()
- obj_.build(child_)
- self.para.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'sect1':
- obj_ = docSect1Type.factory()
- obj_.build(child_)
- self.sect1.append(obj_)
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'internal':
- obj_ = docInternalType.factory()
- obj_.build(child_)
- self.set_internal(obj_)
-# end class docCopyType
-
-
-class docCharType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, char=None, valueOf_=''):
- self.char = char
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if docCharType.subclass:
- return docCharType.subclass(*args_, **kwargs_)
- else:
- return docCharType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_char(self): return self.char
- def set_char(self, char): self.char = char
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docCharType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'):
- if self.char is not None:
- outfile.write(' char=%s' % (quote_attrib(self.char), ))
- def exportChildren(self, outfile, level, namespace_='', name_='docCharType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docCharType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.char is not None:
- showIndent(outfile, level)
- outfile.write('char = "%s",\n' % (self.char,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('char'):
- self.char = attrs.get('char').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docCharType
-
-
-class docEmptyType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, valueOf_=''):
- self.valueOf_ = valueOf_
- def factory(*args_, **kwargs_):
- if docEmptyType.subclass:
- return docEmptyType.subclass(*args_, **kwargs_)
- else:
- return docEmptyType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def getValueOf_(self): return self.valueOf_
- def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
- def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='docEmptyType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'):
- pass
- def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'):
- if self.valueOf_.find('![CDATA')>-1:
- value=quote_xml('%s' % self.valueOf_)
- value=value.replace('![CDATA','<![CDATA')
- value=value.replace(']]',']]>')
- outfile.write(value)
- else:
- outfile.write(quote_xml('%s' % self.valueOf_))
- def hasContent_(self):
- if (
- self.valueOf_ is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='docEmptyType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- pass
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- self.valueOf_ = ''
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- pass
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.TEXT_NODE:
- self.valueOf_ += child_.nodeValue
- elif child_.nodeType == Node.CDATA_SECTION_NODE:
- self.valueOf_ += '![CDATA['+child_.nodeValue+']]'
-# end class docEmptyType
-
-
-USAGE_TEXT = """
-Usage: python <Parser>.py [ -s ] <in_xml_file>
-Options:
- -s Use the SAX parser, not the minidom parser.
-"""
-
-def usage():
- print(USAGE_TEXT)
- sys.exit(1)
-
-
-def parse(inFileName):
- doc = minidom.parse(inFileName)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('<?xml version="1.0" ?>\n')
- rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
- return rootObj
-
-
-def parseString(inString):
- doc = minidom.parseString(inString)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('<?xml version="1.0" ?>\n')
- rootObj.export(sys.stdout, 0, name_="doxygen",
- namespacedef_='')
- return rootObj
-
-
-def parseLiteral(inFileName):
- doc = minidom.parse(inFileName)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('from compound import *\n\n')
- sys.stdout.write('rootObj = doxygen(\n')
- rootObj.exportLiteral(sys.stdout, 0, name_="doxygen")
- sys.stdout.write(')\n')
- return rootObj
-
-
-def main():
- args = sys.argv[1:]
- if len(args) == 1:
- parse(args[0])
- else:
- usage()
-
-
-if __name__ == '__main__':
- main()
- #import pdb
- #pdb.run('main()')
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/index.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/index.py
deleted file mode 100644
index 0c63512119..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/index.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
-"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from xml.dom import minidom
-
-import os
-import sys
-from . import compound
-
-from . import indexsuper as supermod
-
-class DoxygenTypeSub(supermod.DoxygenType):
- def __init__(self, version=None, compound=None):
- supermod.DoxygenType.__init__(self, version, compound)
-
- def find_compounds_and_members(self, details):
- """
- Returns a list of all compounds and their members which match details
- """
-
- results = []
- for compound in self.compound:
- members = compound.find_members(details)
- if members:
- results.append([compound, members])
- else:
- if details.match(compound):
- results.append([compound, []])
-
- return results
-
-supermod.DoxygenType.subclass = DoxygenTypeSub
-# end class DoxygenTypeSub
-
-
-class CompoundTypeSub(supermod.CompoundType):
- def __init__(self, kind=None, refid=None, name='', member=None):
- supermod.CompoundType.__init__(self, kind, refid, name, member)
-
- def find_members(self, details):
- """
- Returns a list of all members which match details
- """
-
- results = []
-
- for member in self.member:
- if details.match(member):
- results.append(member)
-
- return results
-
-supermod.CompoundType.subclass = CompoundTypeSub
-# end class CompoundTypeSub
-
-
-class MemberTypeSub(supermod.MemberType):
-
- def __init__(self, kind=None, refid=None, name=''):
- supermod.MemberType.__init__(self, kind, refid, name)
-
-supermod.MemberType.subclass = MemberTypeSub
-# end class MemberTypeSub
-
-
-def parse(inFilename):
-
- doc = minidom.parse(inFilename)
- rootNode = doc.documentElement
- rootObj = supermod.DoxygenType.factory()
- rootObj.build(rootNode)
-
- return rootObj
-
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/indexsuper.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/indexsuper.py
deleted file mode 100644
index 11312db635..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/generated/indexsuper.py
+++ /dev/null
@@ -1,526 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
-#
-
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import sys
-
-from xml.dom import minidom
-from xml.dom import Node
-
-import six
-
-#
-# User methods
-#
-# Calls to the methods in these classes are generated by generateDS.py.
-# You can replace these methods by re-implementing the following class
-# in a module named generatedssuper.py.
-
-try:
- from generatedssuper import GeneratedsSuper
-except ImportError as exp:
-
- class GeneratedsSuper(object):
- def format_string(self, input_data, input_name=''):
- return input_data
- def format_integer(self, input_data, input_name=''):
- return '%d' % input_data
- def format_float(self, input_data, input_name=''):
- return '%f' % input_data
- def format_double(self, input_data, input_name=''):
- return '%e' % input_data
- def format_boolean(self, input_data, input_name=''):
- return '%s' % input_data
-
-
-#
-# If you have installed IPython you can uncomment and use the following.
-# IPython is available from http://ipython.scipy.org/.
-#
-
-## from IPython.Shell import IPShellEmbed
-## args = ''
-## ipshell = IPShellEmbed(args,
-## banner = 'Dropping into IPython',
-## exit_msg = 'Leaving Interpreter, back to program.')
-
-# Then use the following line where and when you want to drop into the
-# IPython shell:
-# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
-
-#
-# Globals
-#
-
-ExternalEncoding = 'ascii'
-
-#
-# Support/utility functions.
-#
-
-def showIndent(outfile, level):
- for idx in range(level):
- outfile.write(' ')
-
-def quote_xml(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- return s1
-
-def quote_attrib(inStr):
- s1 = (isinstance(inStr, six.string_types) and inStr or
- '%s' % inStr)
- s1 = s1.replace('&', '&amp;')
- s1 = s1.replace('<', '&lt;')
- s1 = s1.replace('>', '&gt;')
- if '"' in s1:
- if "'" in s1:
- s1 = '"%s"' % s1.replace('"', "&quot;")
- else:
- s1 = "'%s'" % s1
- else:
- s1 = '"%s"' % s1
- return s1
-
-def quote_python(inStr):
- s1 = inStr
- if s1.find("'") == -1:
- if s1.find('\n') == -1:
- return "'%s'" % s1
- else:
- return "'''%s'''" % s1
- else:
- if s1.find('"') != -1:
- s1 = s1.replace('"', '\\"')
- if s1.find('\n') == -1:
- return '"%s"' % s1
- else:
- return '"""%s"""' % s1
-
-
-class MixedContainer(object):
- # Constants for category:
- CategoryNone = 0
- CategoryText = 1
- CategorySimple = 2
- CategoryComplex = 3
- # Constants for content_type:
- TypeNone = 0
- TypeText = 1
- TypeString = 2
- TypeInteger = 3
- TypeFloat = 4
- TypeDecimal = 5
- TypeDouble = 6
- TypeBoolean = 7
- def __init__(self, category, content_type, name, value):
- self.category = category
- self.content_type = content_type
- self.name = name
- self.value = value
- def getCategory(self):
- return self.category
- def getContenttype(self, content_type):
- return self.content_type
- def getValue(self):
- return self.value
- def getName(self):
- return self.name
- def export(self, outfile, level, name, namespace):
- if self.category == MixedContainer.CategoryText:
- outfile.write(self.value)
- elif self.category == MixedContainer.CategorySimple:
- self.exportSimple(outfile, level, name)
- else: # category == MixedContainer.CategoryComplex
- self.value.export(outfile, level, namespace,name)
- def exportSimple(self, outfile, level, name):
- if self.content_type == MixedContainer.TypeString:
- outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeInteger or \
- self.content_type == MixedContainer.TypeBoolean:
- outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeFloat or \
- self.content_type == MixedContainer.TypeDecimal:
- outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
- elif self.content_type == MixedContainer.TypeDouble:
- outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
- def exportLiteral(self, outfile, level, name):
- if self.category == MixedContainer.CategoryText:
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
- elif self.category == MixedContainer.CategorySimple:
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
- (self.category, self.content_type, self.name, self.value))
- else: # category == MixedContainer.CategoryComplex
- showIndent(outfile, level)
- outfile.write('MixedContainer(%d, %d, "%s",\n' % \
- (self.category, self.content_type, self.name,))
- self.value.exportLiteral(outfile, level + 1)
- showIndent(outfile, level)
- outfile.write(')\n')
-
-
-class _MemberSpec(object):
- def __init__(self, name='', data_type='', container=0):
- self.name = name
- self.data_type = data_type
- self.container = container
- def set_name(self, name): self.name = name
- def get_name(self): return self.name
- def set_data_type(self, data_type): self.data_type = data_type
- def get_data_type(self): return self.data_type
- def set_container(self, container): self.container = container
- def get_container(self): return self.container
-
-
-#
-# Data representation classes.
-#
-
-class DoxygenType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, version=None, compound=None):
- self.version = version
- if compound is None:
- self.compound = []
- else:
- self.compound = compound
- def factory(*args_, **kwargs_):
- if DoxygenType.subclass:
- return DoxygenType.subclass(*args_, **kwargs_)
- else:
- return DoxygenType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_compound(self): return self.compound
- def set_compound(self, compound): self.compound = compound
- def add_compound(self, value): self.compound.append(value)
- def insert_compound(self, index, value): self.compound[index] = value
- def get_version(self): return self.version
- def set_version(self, version): self.version = version
- def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
- outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
- for compound_ in self.compound:
- compound_.export(outfile, level, namespace_, name_='compound')
- def hasContent_(self):
- if (
- self.compound is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='DoxygenType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.version is not None:
- showIndent(outfile, level)
- outfile.write('version = %s,\n' % (self.version,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('compound=[\n')
- level += 1
- for compound in self.compound:
- showIndent(outfile, level)
- outfile.write('model_.compound(\n')
- compound.exportLiteral(outfile, level, name_='compound')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('version'):
- self.version = attrs.get('version').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'compound':
- obj_ = CompoundType.factory()
- obj_.build(child_)
- self.compound.append(obj_)
-# end class DoxygenType
-
-
-class CompoundType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, refid=None, name=None, member=None):
- self.kind = kind
- self.refid = refid
- self.name = name
- if member is None:
- self.member = []
- else:
- self.member = member
- def factory(*args_, **kwargs_):
- if CompoundType.subclass:
- return CompoundType.subclass(*args_, **kwargs_)
- else:
- return CompoundType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_member(self): return self.member
- def set_member(self, member): self.member = member
- def add_member(self, value): self.member.append(value)
- def insert_member(self, index, value): self.member[index] = value
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
- for member_ in self.member:
- member_.export(outfile, level, namespace_, name_='member')
- def hasContent_(self):
- if (
- self.name is not None or
- self.member is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='CompoundType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
- showIndent(outfile, level)
- outfile.write('member=[\n')
- level += 1
- for member in self.member:
- showIndent(outfile, level)
- outfile.write('model_.member(\n')
- member.exportLiteral(outfile, level, name_='member')
- showIndent(outfile, level)
- outfile.write('),\n')
- level -= 1
- showIndent(outfile, level)
- outfile.write('],\n')
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
- name_ = ''
- for text__content_ in child_.childNodes:
- name_ += text__content_.nodeValue
- self.name = name_
- elif child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'member':
- obj_ = MemberType.factory()
- obj_.build(child_)
- self.member.append(obj_)
-# end class CompoundType
-
-
-class MemberType(GeneratedsSuper):
- subclass = None
- superclass = None
- def __init__(self, kind=None, refid=None, name=None):
- self.kind = kind
- self.refid = refid
- self.name = name
- def factory(*args_, **kwargs_):
- if MemberType.subclass:
- return MemberType.subclass(*args_, **kwargs_)
- else:
- return MemberType(*args_, **kwargs_)
- factory = staticmethod(factory)
- def get_name(self): return self.name
- def set_name(self, name): self.name = name
- def get_kind(self): return self.kind
- def set_kind(self, kind): self.kind = kind
- def get_refid(self): return self.refid
- def set_refid(self, refid): self.refid = refid
- def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
- showIndent(outfile, level)
- outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
- self.exportAttributes(outfile, level, namespace_, name_='MemberType')
- if self.hasContent_():
- outfile.write('>\n')
- self.exportChildren(outfile, level + 1, namespace_, name_)
- showIndent(outfile, level)
- outfile.write('</%s%s>\n' % (namespace_, name_))
- else:
- outfile.write(' />\n')
- def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
- outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
- outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
- def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
- if self.name is not None:
- showIndent(outfile, level)
- outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
- def hasContent_(self):
- if (
- self.name is not None
- ):
- return True
- else:
- return False
- def exportLiteral(self, outfile, level, name_='MemberType'):
- level += 1
- self.exportLiteralAttributes(outfile, level, name_)
- if self.hasContent_():
- self.exportLiteralChildren(outfile, level, name_)
- def exportLiteralAttributes(self, outfile, level, name_):
- if self.kind is not None:
- showIndent(outfile, level)
- outfile.write('kind = "%s",\n' % (self.kind,))
- if self.refid is not None:
- showIndent(outfile, level)
- outfile.write('refid = %s,\n' % (self.refid,))
- def exportLiteralChildren(self, outfile, level, name_):
- showIndent(outfile, level)
- outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
- def build(self, node_):
- attrs = node_.attributes
- self.buildAttributes(attrs)
- for child_ in node_.childNodes:
- nodeName_ = child_.nodeName.split(':')[-1]
- self.buildChildren(child_, nodeName_)
- def buildAttributes(self, attrs):
- if attrs.get('kind'):
- self.kind = attrs.get('kind').value
- if attrs.get('refid'):
- self.refid = attrs.get('refid').value
- def buildChildren(self, child_, nodeName_):
- if child_.nodeType == Node.ELEMENT_NODE and \
- nodeName_ == 'name':
- name_ = ''
- for text__content_ in child_.childNodes:
- name_ += text__content_.nodeValue
- self.name = name_
-# end class MemberType
-
-
-USAGE_TEXT = """
-Usage: python <Parser>.py [ -s ] <in_xml_file>
-Options:
- -s Use the SAX parser, not the minidom parser.
-"""
-
-def usage():
- print(USAGE_TEXT)
- sys.exit(1)
-
-
-def parse(inFileName):
- doc = minidom.parse(inFileName)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('<?xml version="1.0" ?>\n')
- rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
- return rootObj
-
-
-def parseString(inString):
- doc = minidom.parseString(inString)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('<?xml version="1.0" ?>\n')
- rootObj.export(sys.stdout, 0, name_="doxygenindex",
- namespacedef_='')
- return rootObj
-
-
-def parseLiteral(inFileName):
- doc = minidom.parse(inFileName)
- rootNode = doc.documentElement
- rootObj = DoxygenType.factory()
- rootObj.build(rootNode)
- # Enable Python to collect the space used by the DOM.
- doc = None
- sys.stdout.write('from index import *\n\n')
- sys.stdout.write('rootObj = doxygenindex(\n')
- rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
- sys.stdout.write(')\n')
- return rootObj
-
-
-def main():
- args = sys.argv[1:]
- if len(args) == 1:
- parse(args[0])
- else:
- usage()
-
-
-
-
-if __name__ == '__main__':
- main()
- #import pdb
- #pdb.run('main()')
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/text.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/text.py
deleted file mode 100644
index fa59668c01..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/doxyxml/text.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright 2010 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-"""
-Utilities for extracting text from generated classes.
-"""
-from __future__ import unicode_literals
-
-def is_string(txt):
- if isinstance(txt, str):
- return True
- try:
- if isinstance(txt, str):
- return True
- except NameError:
- pass
- return False
-
-def description(obj):
- if obj is None:
- return None
- return description_bit(obj).strip()
-
-def description_bit(obj):
- if hasattr(obj, 'content'):
- contents = [description_bit(item) for item in obj.content]
- result = ''.join(contents)
- elif hasattr(obj, 'content_'):
- contents = [description_bit(item) for item in obj.content_]
- result = ''.join(contents)
- elif hasattr(obj, 'value'):
- result = description_bit(obj.value)
- elif is_string(obj):
- return obj
- else:
- raise Exception('Expecting a string or something with content, content_ or value attribute')
- # If this bit is a paragraph then add one some line breaks.
- if hasattr(obj, 'name') and obj.name == 'para':
- result += "\n\n"
- return result
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/group_defs.dox b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/group_defs.dox
deleted file mode 100644
index 708f8c6d98..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/group_defs.dox
+++ /dev/null
@@ -1,7 +0,0 @@
-/*!
- * \defgroup block GNU Radio HOWTO C++ Signal Processing Blocks
- * \brief All C++ blocks that can be used from the HOWTO GNU Radio
- * module are listed here or in the subcategories below.
- *
- */
-
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/main_page.dox b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/main_page.dox
deleted file mode 100644
index 6357044912..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/other/main_page.dox
+++ /dev/null
@@ -1,10 +0,0 @@
-/*! \mainpage
-
-Welcome to the GNU Radio HOWTO Block
-
-This is the intro page for the Doxygen manual generated for the HOWTO
-block (docs/doxygen/other/main_page.dox). Edit it to add more detailed
-documentation about the new GNU Radio modules contained in this
-project.
-
-*/
diff --git a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/swig_doc.py b/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/swig_doc.py
deleted file mode 100644
index 288fed2aa7..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/docs/doxygen/swig_doc.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#
-# Copyright 2010-2012 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-"""
-Creates the swig_doc.i SWIG interface file.
-Execute using: python swig_doc.py xml_path outputfilename
-
-The file instructs SWIG to transfer the doxygen comments into the
-python docstrings.
-
-"""
-from __future__ import unicode_literals
-
-import sys, time
-
-from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile
-from doxyxml import DoxyOther, base
-
-def py_name(name):
- bits = name.split('_')
- return '_'.join(bits[1:])
-
-def make_name(name):
- bits = name.split('_')
- return bits[0] + '_make_' + '_'.join(bits[1:])
-
-
-class Block(object):
- """
- Checks if doxyxml produced objects correspond to a gnuradio block.
- """
-
- @classmethod
- def includes(cls, item):
- if not isinstance(item, DoxyClass):
- return False
- # Check for a parsing error.
- if item.error():
- return False
- friendname = make_name(item.name())
- is_a_block = item.has_member(friendname, DoxyFriend)
- # But now sometimes the make function isn't a friend so check again.
- if not is_a_block:
- is_a_block = di.has_member(friendname, DoxyFunction)
- return is_a_block
-
-class Block2(object):
- """
- Checks if doxyxml produced objects correspond to a new style
- gnuradio block.
- """
-
- @classmethod
- def includes(cls, item):
- if not isinstance(item, DoxyClass):
- return False
- # Check for a parsing error.
- if item.error():
- return False
- is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther)
- return is_a_block2
-
-
-def utoascii(text):
- """
- Convert unicode text into ascii and escape quotes and backslashes.
- """
- if text is None:
- return ''
- out = text.encode('ascii', 'replace')
- # swig will require us to replace blackslash with 4 backslashes
- out = out.replace(b'\\', b'\\\\\\\\')
- out = out.replace(b'"', b'\\"').decode('ascii')
- return str(out)
-
-
-def combine_descriptions(obj):
- """
- Combines the brief and detailed descriptions of an object together.
- """
- description = []
- bd = obj.brief_description.strip()
- dd = obj.detailed_description.strip()
- if bd:
- description.append(bd)
- if dd:
- description.append(dd)
- return utoascii('\n\n'.join(description)).strip()
-
-def format_params(parameteritems):
- output = ['Args:']
- template = ' {0} : {1}'
- for pi in parameteritems:
- output.append(template.format(pi.name, pi.description))
- return '\n'.join(output)
-
-entry_templ = '%feature("docstring") {name} "{docstring}"'
-def make_entry(obj, name=None, templ="{description}", description=None, params=[]):
- """
- Create a docstring entry for a swig interface file.
-
- obj - a doxyxml object from which documentation will be extracted.
- name - the name of the C object (defaults to obj.name())
- templ - an optional template for the docstring containing only one
- variable named 'description'.
- description - if this optional variable is set then it's value is
- used as the description instead of extracting it from obj.
- """
- if name is None:
- name=obj.name()
- if "operator " in name:
- return ''
- if description is None:
- description = combine_descriptions(obj)
- if params:
- description += '\n\n'
- description += utoascii(format_params(params))
- docstring = templ.format(description=description)
- if not docstring:
- return ''
- return entry_templ.format(
- name=name,
- docstring=docstring,
- )
-
-
-def make_func_entry(func, name=None, description=None, params=None):
- """
- Create a function docstring entry for a swig interface file.
-
- func - a doxyxml object from which documentation will be extracted.
- name - the name of the C object (defaults to func.name())
- description - if this optional variable is set then it's value is
- used as the description instead of extracting it from func.
- params - a parameter list that overrides using func.params.
- """
- #if params is None:
- # params = func.params
- #params = [prm.declname for prm in params]
- #if params:
- # sig = "Params: (%s)" % ", ".join(params)
- #else:
- # sig = "Params: (NONE)"
- #templ = "{description}\n\n" + sig
- #return make_entry(func, name=name, templ=utoascii(templ),
- # description=description)
- return make_entry(func, name=name, description=description, params=params)
-
-
-def make_class_entry(klass, description=None, ignored_methods=[], params=None):
- """
- Create a class docstring for a swig interface file.
- """
- if params is None:
- params = klass.params
- output = []
- output.append(make_entry(klass, description=description, params=params))
- for func in klass.in_category(DoxyFunction):
- if func.name() not in ignored_methods:
- name = klass.name() + '::' + func.name()
- output.append(make_func_entry(func, name=name))
- return "\n\n".join(output)
-
-
-def make_block_entry(di, block):
- """
- Create class and function docstrings of a gnuradio block for a
- swig interface file.
- """
- descriptions = []
- # Get the documentation associated with the class.
- class_desc = combine_descriptions(block)
- if class_desc:
- descriptions.append(class_desc)
- # Get the documentation associated with the make function
- make_func = di.get_member(make_name(block.name()), DoxyFunction)
- make_func_desc = combine_descriptions(make_func)
- if make_func_desc:
- descriptions.append(make_func_desc)
- # Get the documentation associated with the file
- try:
- block_file = di.get_member(block.name() + ".h", DoxyFile)
- file_desc = combine_descriptions(block_file)
- if file_desc:
- descriptions.append(file_desc)
- except base.Base.NoSuchMember:
- # Don't worry if we can't find a matching file.
- pass
- # And join them all together to make a super duper description.
- super_description = "\n\n".join(descriptions)
- # Associate the combined description with the class and
- # the make function.
- output = []
- output.append(make_class_entry(block, description=super_description))
- output.append(make_func_entry(make_func, description=super_description,
- params=block.params))
- return "\n\n".join(output)
-
-def make_block2_entry(di, block):
- """
- Create class and function docstrings of a new style gnuradio block for a
- swig interface file.
- """
- descriptions = []
- # For new style blocks all the relevant documentation should be
- # associated with the 'make' method.
- class_description = combine_descriptions(block)
- make_func = block.get_member('make', DoxyFunction)
- make_description = combine_descriptions(make_func)
- description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description
- # Associate the combined description with the class and
- # the make function.
- output = []
- output.append(make_class_entry(
- block, description=description,
- ignored_methods=['make'], params=make_func.params))
- makename = block.name() + '::make'
- output.append(make_func_entry(
- make_func, name=makename, description=description,
- params=make_func.params))
- return "\n\n".join(output)
-
-def make_swig_interface_file(di, swigdocfilename, custom_output=None):
-
- output = ["""
-/*
- * This file was automatically generated using swig_doc.py.
- *
- * Any changes to it will be lost next time it is regenerated.
- */
-"""]
-
- if custom_output is not None:
- output.append(custom_output)
-
- # Create docstrings for the blocks.
- blocks = di.in_category(Block)
- blocks2 = di.in_category(Block2)
-
- make_funcs = set([])
- for block in blocks:
- try:
- make_func = di.get_member(make_name(block.name()), DoxyFunction)
- # Don't want to risk writing to output twice.
- if make_func.name() not in make_funcs:
- make_funcs.add(make_func.name())
- output.append(make_block_entry(di, block))
- except block.ParsingError:
- sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
- raise
-
- for block in blocks2:
- try:
- make_func = block.get_member('make', DoxyFunction)
- make_func_name = block.name() +'::make'
- # Don't want to risk writing to output twice.
- if make_func_name not in make_funcs:
- make_funcs.add(make_func_name)
- output.append(make_block2_entry(di, block))
- except block.ParsingError:
- sys.stderr.write('Parsing error for block {0}\n'.format(block.name()))
- raise
-
- # Create docstrings for functions
- # Don't include the make functions since they have already been dealt with.
- funcs = [f for f in di.in_category(DoxyFunction)
- if f.name() not in make_funcs and not f.name().startswith('std::')]
- for f in funcs:
- try:
- output.append(make_func_entry(f))
- except f.ParsingError:
- sys.stderr.write('Parsing error for function {0}\n'.format(f.name()))
-
- # Create docstrings for classes
- block_names = [block.name() for block in blocks]
- block_names += [block.name() for block in blocks2]
- klasses = [k for k in di.in_category(DoxyClass)
- if k.name() not in block_names and not k.name().startswith('std::')]
- for k in klasses:
- try:
- output.append(make_class_entry(k))
- except k.ParsingError:
- sys.stderr.write('Parsing error for class {0}\n'.format(k.name()))
-
- # Docstrings are not created for anything that is not a function or a class.
- # If this excludes anything important please add it here.
-
- output = "\n\n".join(output)
-
- swig_doc = open(swigdocfilename, 'w')
- swig_doc.write(output)
- swig_doc.close()
-
-if __name__ == "__main__":
- # Parse command line options and set up doxyxml.
- err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
- if len(sys.argv) != 3:
- raise Exception(err_msg)
- xml_path = sys.argv[1]
- swigdocfilename = sys.argv[2]
- di = DoxyIndex(xml_path)
-
- # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
- # This is presumably a bug in SWIG.
- #msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
- #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
- #delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
- output = []
- #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
- #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
- custom_output = "\n\n".join(output)
-
- # Generate the docstrings interface file.
- make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/examples/README b/gr-utils/python/modtool/templates/gr-newmod/examples/README
deleted file mode 100644
index c012bdfa0a..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/examples/README
+++ /dev/null
@@ -1,4 +0,0 @@
-It is considered good practice to add examples in here to demonstrate the
-functionality of your OOT module. Python scripts, GRC flow graphs or other
-code can go here.
-
diff --git a/gr-utils/python/modtool/templates/gr-newmod/grc/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/grc/CMakeLists.txt
deleted file mode 100644
index 25ab1fd61f..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/grc/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-install(FILES
- DESTINATION share/gnuradio/grc/blocks
-)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/include/howto/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/include/howto/CMakeLists.txt
deleted file mode 100644
index f0aa72bb2f..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/include/howto/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2011,2012 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Install public header files
-########################################################################
-install(FILES
- api.h
- DESTINATION include/howto
-)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/include/howto/api.h b/gr-utils/python/modtool/templates/gr-newmod/include/howto/api.h
deleted file mode 100644
index c72d17b8c8..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/include/howto/api.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2011 Free Software Foundation, Inc.
- *
- * This file was generated by gr_modtool, a tool from the GNU Radio framework
- * This file is a part of gr-howto
- *
- * SPDX-License-Identifier: GPL-3.0-or-later
- *
- */
-
-#ifndef INCLUDED_HOWTO_API_H
-#define INCLUDED_HOWTO_API_H
-
-#include <gnuradio/attributes.h>
-
-#ifdef gnuradio_howto_EXPORTS
-#define HOWTO_API __GR_ATTR_EXPORT
-#else
-#define HOWTO_API __GR_ATTR_IMPORT
-#endif
-
-#endif /* INCLUDED_HOWTO_API_H */
diff --git a/gr-utils/python/modtool/templates/gr-newmod/lib/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/lib/CMakeLists.txt
deleted file mode 100644
index 2330b72e20..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/lib/CMakeLists.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2011,2012,2016,2018,2019 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Setup library
-########################################################################
-include(GrPlatform) #define LIB_SUFFIX
-
-list(APPEND howto_sources
-)
-
-set(howto_sources "${howto_sources}" PARENT_SCOPE)
-if(NOT howto_sources)
- MESSAGE(STATUS "No C++ sources... skipping lib/")
- return()
-endif(NOT howto_sources)
-
-add_library(gnuradio-howto SHARED ${howto_sources})
-target_link_libraries(gnuradio-howto gnuradio::gnuradio-runtime)
-target_include_directories(gnuradio-howto
- PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
- PUBLIC $<INSTALL_INTERFACE:include>
- )
-set_target_properties(gnuradio-howto PROPERTIES DEFINE_SYMBOL "gnuradio_howto_EXPORTS")
-
-if(APPLE)
- set_target_properties(gnuradio-howto PROPERTIES
- INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
- )
-endif(APPLE)
-
-########################################################################
-# Install built library files
-########################################################################
-include(GrMiscUtils)
-GR_LIBRARY_FOO(gnuradio-howto)
-
-########################################################################
-# Print summary
-########################################################################
-message(STATUS "Using install prefix: ${CMAKE_INSTALL_PREFIX}")
-message(STATUS "Building for version: ${VERSION} / ${LIBVER}")
-
-########################################################################
-# Build and register unit test
-########################################################################
-include(GrTest)
-
-# If your unit tests require special include paths, add them here
-#include_directories()
-# List all files that contain Boost.UTF unit tests here
-list(APPEND test_howto_sources
-)
-# Anything we need to link to for the unit tests go here
-list(APPEND GR_TEST_TARGET_DEPS gnuradio-howto)
-
-if(NOT test_howto_sources)
- MESSAGE(STATUS "No C++ unit tests... skipping")
- return()
-endif(NOT test_howto_sources)
-
-foreach(qa_file ${test_howto_sources})
- GR_ADD_CPP_TEST("howto_${qa_file}"
- ${CMAKE_CURRENT_SOURCE_DIR}/${qa_file}
- )
-endforeach(qa_file)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/python/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/python/CMakeLists.txt
deleted file mode 100644
index a140eb618c..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/python/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Include python install macros
-########################################################################
-include(GrPython)
-if(NOT PYTHONINTERP_FOUND)
- return()
-endif()
-
-########################################################################
-# Install python sources
-########################################################################
-GR_PYTHON_INSTALL(
- FILES
- __init__.py
- DESTINATION ${GR_PYTHON_DIR}/howto
-)
-
-########################################################################
-# Handle the unit tests
-########################################################################
-include(GrTest)
-
-set(GR_TEST_TARGET_DEPS gnuradio-howto)
-set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/python/__init__.py b/gr-utils/python/modtool/templates/gr-newmod/python/__init__.py
deleted file mode 100644
index 2a48948088..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/python/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright 2008,2009 Free Software Foundation, Inc.
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-# The presence of this file turns this directory into a Python package
-
-'''
-This is the GNU Radio HOWTO module. Place your Python package
-description here (python/__init__.py).
-'''
-from __future__ import unicode_literals
-
-# import swig generated symbols into the howto namespace
-try:
- # this might fail if the module is python-only
- from .howto_swig import *
-except ImportError:
- pass
-
-# import any pure python here
-#
diff --git a/gr-utils/python/modtool/templates/gr-newmod/swig/CMakeLists.txt b/gr-utils/python/modtool/templates/gr-newmod/swig/CMakeLists.txt
deleted file mode 100644
index 71a19594b7..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/swig/CMakeLists.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2011 Free Software Foundation, Inc.
-#
-# This file was generated by gr_modtool, a tool from the GNU Radio framework
-# This file is a part of gr-howto
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-########################################################################
-# Check if there is C++ code at all
-########################################################################
-if(NOT howto_sources)
- MESSAGE(STATUS "No C++ sources... skipping swig/")
- return()
-endif(NOT howto_sources)
-
-########################################################################
-# Include swig generation macros
-########################################################################
-find_package(SWIG)
-find_package(PythonLibs)
-if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND)
- return()
-endif()
-include(GrSwig)
-include(GrPython)
-
-########################################################################
-# Setup swig generation
-########################################################################
-set(GR_SWIG_INCLUDE_DIRS $<TARGET_PROPERTY:gnuradio::runtime_swig,INTERFACE_INCLUDE_DIRECTORIES>)
-set(GR_SWIG_TARGET_DEPS gnuradio::runtime_swig)
-
-set(GR_SWIG_LIBRARIES gnuradio-howto)
-
-set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/howto_swig_doc.i)
-set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include)
-
-GR_SWIG_MAKE(howto_swig howto_swig.i)
-
-########################################################################
-# Install the build swig module
-########################################################################
-GR_SWIG_INSTALL(TARGETS howto_swig DESTINATION ${GR_PYTHON_DIR}/howto)
-
-########################################################################
-# Install swig .i files for development
-########################################################################
-install(
- FILES
- howto_swig.i
- ${CMAKE_CURRENT_BINARY_DIR}/howto_swig_doc.i
- DESTINATION ${GR_INCLUDE_DIR}/howto/swig
-)
diff --git a/gr-utils/python/modtool/templates/gr-newmod/swig/howto_swig.i b/gr-utils/python/modtool/templates/gr-newmod/swig/howto_swig.i
deleted file mode 100644
index 2e24c12350..0000000000
--- a/gr-utils/python/modtool/templates/gr-newmod/swig/howto_swig.i
+++ /dev/null
@@ -1,12 +0,0 @@
-/* -*- c++ -*- */
-
-#define HOWTO_API
-
-%include "gnuradio.i" // the common stuff
-
-//load generated python docstrings
-%include "howto_swig_doc.i"
-
-%{
-%}
-
diff --git a/gr-utils/python/modtool/templates/templates.py b/gr-utils/python/modtool/templates/templates.py
deleted file mode 100644
index fe799cd026..0000000000
--- a/gr-utils/python/modtool/templates/templates.py
+++ /dev/null
@@ -1,754 +0,0 @@
-#
-# Copyright 2013-2014, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-''' All the templates for skeleton files (needed by ModToolAdd) '''
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from datetime import datetime
-
-Templates = {}
-
-# Default licence
-Templates['defaultlicense'] = '''
-Copyright %d {copyrightholder}.
-
-SPDX-License-Identifier: GPL-3.0-or-later
-''' % datetime.now().year
-
-Templates['grlicense'] = '''
-Copyright {0} Free Software Foundation, Inc.
-
-This file is part of GNU Radio
-
-SPDX-License-Identifier: GPL-3.0-or-later
-'''.format(datetime.now().year)
-
-# Header file of a sync/decimator/interpolator block
-Templates['block_impl_h'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H
-#define INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H
-
-#include <${include_dir_prefix}/${blockname}.h>
-
-namespace gr {
- namespace ${modname} {
-
- class ${blockname}_impl : public ${blockname}
- {
- private:
- // Nothing to declare in this block.
-
-% if blocktype == 'tagged_stream':
- protected:
- int calculate_output_stream_length(const gr_vector_int &ninput_items);
-
-% endif
- public:
- ${blockname}_impl(${strip_default_values(arglist)});
- ~${blockname}_impl();
-
- // Where all the action really happens
-% if blocktype == 'general':
- void forecast (int noutput_items, gr_vector_int &ninput_items_required);
-
- int general_work(int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items);
-
-% elif blocktype == 'tagged_stream':
- int work(
- int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items
- );
-% elif blocktype == 'hier':
-% else:
- int work(
- int noutput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items
- );
-% endif
- };
-
- } // namespace ${modname}
-} // namespace gr
-
-#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H */
-
-'''
-
-# C++ file of a GR block
-Templates['block_impl_cpp'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <gnuradio/io_signature.h>
-% if blocktype == 'noblock':
-#include <${include_dir_prefix}/${blockname}.h>
-% else:
-#include "${blockname}_impl.h"
-% endif
-
-namespace gr {
- namespace ${modname} {
-
-% if blocktype == 'noblock':
- ${blockname}::${blockname}(${strip_default_values(arglist)})
- {
- }
-
- ${blockname}::~${blockname}()
- {
- }
-% else:
- ${blockname}::sptr
- ${blockname}::make(${strip_default_values(arglist)})
- {
- return gnuradio::get_initial_sptr
- (new ${blockname}_impl(${strip_arg_types(arglist)}));
- }
-
-<%
- if blocktype == 'decimator':
- decimation = ', <+decimation+>'
- elif blocktype == 'interpolator':
- decimation = ', <+interpolation+>'
- elif blocktype == 'tagged_stream':
- decimation = ', <+len_tag_key+>'
- else:
- decimation = ''
- endif
- if blocktype == 'source':
- inputsig = '0, 0, 0'
- else:
- inputsig = '<+MIN_IN+>, <+MAX_IN+>, sizeof(<+ITYPE+>)'
- endif
- if blocktype == 'sink':
- outputsig = '0, 0, 0'
- else:
- outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, sizeof(<+OTYPE+>)'
- endif
-%>
- /*
- * The private constructor
- */
- ${blockname}_impl::${blockname}_impl(${strip_default_values(arglist)})
- : gr::${grblocktype}("${blockname}",
- gr::io_signature::make(${inputsig}),
- gr::io_signature::make(${outputsig})${decimation})
- % if blocktype == 'hier':
- {
- connect(self(), 0, d_firstblock, 0);
- // connect other blocks
- connect(d_lastblock, 0, self(), 0);
- }
- % else:
- {}
- % endif
-
- /*
- * Our virtual destructor.
- */
- ${blockname}_impl::~${blockname}_impl()
- {
- }
-
- % if blocktype == 'general':
- void
- ${blockname}_impl::forecast (int noutput_items, gr_vector_int &ninput_items_required)
- {
- /* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
- }
-
- int
- ${blockname}_impl::general_work (int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items)
- {
- const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
- <+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
-
- // Do <+signal processing+>
- // Tell runtime system how many input items we consumed on
- // each input stream.
- consume_each (noutput_items);
-
- // Tell runtime system how many output items we produced.
- return noutput_items;
- }
- % elif blocktype == 'tagged_stream':
- int
- ${blockname}_impl::calculate_output_stream_length(const gr_vector_int &ninput_items)
- {
- int noutput_items = /* <+set this+> */;
- return noutput_items ;
- }
-
- int
- ${blockname}_impl::work (int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items)
- {
- const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
- <+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
-
- // Do <+signal processing+>
-
- // Tell runtime system how many output items we produced.
- return noutput_items;
- }
- % elif blocktype == 'hier':
- % else:
- int
- ${blockname}_impl::work(int noutput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items)
- {
- % if blocktype != 'source':
- const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
- % endif
- % if blocktype != 'sink':
- <+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
- % endif
-
- // Do <+signal processing+>
-
- // Tell runtime system how many output items we produced.
- return noutput_items;
- }
- % endif
-% endif
-
- } /* namespace ${modname} */
-} /* namespace gr */
-
-'''
-
-# Block definition header file (for include/)
-Templates['block_def_h'] = r'''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_H
-#define INCLUDED_${modname.upper()}_${blockname.upper()}_H
-
-#include <${include_dir_prefix}/api.h>
-% if blocktype != 'noblock':
-#include <gnuradio/${grblocktype}.h>
-% endif
-
-namespace gr {
- namespace ${modname} {
-
-% if blocktype == 'noblock':
- /*!
- * \brief <+description+>
- *
- */
- class ${modname.upper()}_API ${blockname}
- {
- public:
- ${blockname}(${arglist});
- ~${blockname}();
- private:
- };
-% else:
- /*!
- * \brief <+description of block+>
- * \ingroup ${modname}
- *
- */
- class ${modname.upper()}_API ${blockname} : virtual public gr::${grblocktype}
- {
- public:
- typedef std::shared_ptr<${blockname}> sptr;
-
- /*!
- * \brief Return a shared_ptr to a new instance of ${modname}::${blockname}.
- *
- * To avoid accidental use of raw pointers, ${modname}::${blockname}'s
- * constructor is in a private implementation
- * class. ${modname}::${blockname}::make is the public interface for
- * creating new instances.
- */
- static sptr make(${arglist});
- };
-% endif
-
- } // namespace ${modname}
-} // namespace gr
-
-#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_H */
-
-'''
-
-# Python block
-Templates['block_python'] = '''#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-${str_to_python_comment(license)}
-<%
- if blocktype == 'noblock':
- return
- if blocktype in ('sync', 'sink', 'source'):
- parenttype = 'gr.sync_block'
- else:
- parenttype = {
- 'hier': 'gr.hier_block2',
- 'interpolator': 'gr.interp_block',
- 'decimator': 'gr.decim_block',
- 'general': 'gr.basic_block'
- }[blocktype]
-%>
-% if blocktype != 'hier':
-import numpy\
-<%
- if blocktype == 'source':
- inputsig = 'None'
- else:
- inputsig = '[<+numpy.float32+>, ]'
- if blocktype == 'sink':
- outputsig = 'None'
- else:
- outputsig = '[<+numpy.float32+>, ]'
-%>
-% else:
-<%
- if blocktype == 'source':
- inputsig = '0, 0, 0'
- else:
- inputsig = '<+MIN_IN+>, <+MAX_IN+>, gr.sizeof_<+ITYPE+>'
- if blocktype == 'sink':
- outputsig = '0, 0, 0'
- else:
- outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, gr.sizeof_<+OTYPE+>'
-%>
-% endif
-<%
- if blocktype == 'interpolator':
- deciminterp = ', <+interpolation+>'
- elif blocktype == 'decimator':
- deciminterp = ', <+decimation+>'
- else:
- deciminterp = ''
- if arglist == '':
- arglistsep = ''
- else:
- arglistsep = ', '
-%>from gnuradio import gr
-
-class ${blockname}(${parenttype}):
- """
- docstring for block ${blockname}
- """
- def __init__(self${arglistsep}${arglist}):
- ${parenttype}.__init__(self,
-% if blocktype == 'hier':
- "${blockname}",
- gr.io_signature(${inputsig}), # Input signature
- gr.io_signature(${outputsig})) # Output signature
-
- # Define blocks and connect them
- self.connect()
-<% return %>
-% else:
- name="${blockname}",
- in_sig=${inputsig},
- out_sig=${outputsig}${deciminterp})
-% endif
-
-% if blocktype == 'general':
- def forecast(self, noutput_items, ninput_items_required):
- #setup size of input_items[i] for work call
- for i in range(len(ninput_items_required)):
- ninput_items_required[i] = noutput_items
-
- def general_work(self, input_items, output_items):
- output_items[0][:] = input_items[0]
- consume(0, len(input_items[0]))\
- #self.consume_each(len(input_items[0]))
- return len(output_items[0])
-<% return %>
-% endif
-
- def work(self, input_items, output_items):
-% if blocktype != 'source':
- in0 = input_items[0]
-% endif
-% if blocktype != 'sink':
- out = output_items[0]
-% endif
- # <+signal processing here+>
-% if blocktype in ('sync', 'decimator', 'interpolator'):
- out[:] = in0
- return len(output_items[0])
-% elif blocktype == 'sink':
- return len(input_items[0])
-% elif blocktype == 'source':
- out[:] = whatever
- return len(output_items[0])
-% endif
-
-'''
-
-# C++ file for QA (Boost UTF style)
-Templates['qa_cpp_boostutf'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#include <${include_dir_prefix}/${blockname}.h>
-#include <gnuradio/attributes.h>
-#include <boost/test/unit_test.hpp>
-
-namespace gr {
- namespace ${modname} {
-
- BOOST_AUTO_TEST_CASE(test_${blockname}_t1)
- {
- // Put test here
- }
-
- } /* namespace ${modname} */
-} /* namespace gr */
-'''
-
-# C++ file for QA
-Templates['qa_cpp'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#include <gnuradio/attributes.h>
-#include <cppunit/TestAssert.h>
-#include "qa_${blockname}.h"
-#include <${include_dir_prefix}/${blockname}.h>
-
-namespace gr {
- namespace ${modname} {
-
- void
- qa_${blockname}::t1()
- {
- // Put test here
- }
-
- } /* namespace ${modname} */
-} /* namespace gr */
-
-'''
-
-# Header file for QA
-Templates['qa_h'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifndef _QA_${blockname.upper()}_H_
-#define _QA_${blockname.upper()}_H_
-
-#include <cppunit/extensions/HelperMacros.h>
-#include <cppunit/TestCase.h>
-
-namespace gr {
- namespace ${modname} {
-
- class qa_${blockname} : public CppUnit::TestCase
- {
- public:
- CPPUNIT_TEST_SUITE(qa_${blockname});
- CPPUNIT_TEST(t1);
- CPPUNIT_TEST_SUITE_END();
-
- private:
- void t1();
- };
-
- } /* namespace ${modname} */
-} /* namespace gr */
-
-#endif /* _QA_${blockname.upper()}_H_ */
-
-'''
-
-# Python QA code
-Templates['qa_python'] = '''#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-${str_to_python_comment(license)}
-from gnuradio import gr, gr_unittest
-from gnuradio import blocks
-% if lang == 'cpp':
-import ${modname}_swig as ${modname}
-% else:
-from ${blockname} import ${blockname}
-% endif
-
-class qa_${blockname}(gr_unittest.TestCase):
-
- def setUp(self):
- self.tb = gr.top_block()
-
- def tearDown(self):
- self.tb = None
-
- def test_001_t(self):
- # set up fg
- self.tb.run()
- # check data
-
-
-if __name__ == '__main__':
- gr_unittest.run(qa_${blockname})
-'''
-
-Templates['grc_yml'] = '''id: ${modname}_${blockname}
-label: ${blockname}
-category: '[${modname}]'
-
-templates:
- imports: import ${modname}
- make: ${modname}.${blockname}(${strip_arg_types_grc(arglist)})
-
-# Make one 'parameters' list entry for every parameter you want settable from the GUI.
-# Keys include:
-# * id (makes the value accessible as keyname, e.g. in the make entry)
-# * label (label shown in the GUI)
-# * dtype (e.g. int, float, complex, byte, short, xxx_vector, ...)
-parameters:
-- id: ...
- label: ...
- dtype: ...
-- id: ...
- label: ...
- dtype: ...
-
-# Make one 'inputs' list entry per input and one 'outputs' list entry per output.
-# Keys include:
-# * label (an identifier for the GUI)
-# * domain (optional - stream or message. Default is stream)
-# * dtype (e.g. int, float, complex, byte, short, xxx_vector, ...)
-# * vlen (optional - data stream vector length. Default is 1)
-# * optional (optional - set to 1 for optional inputs. Default is 0)
-inputs:
-- label: ...
- domain: ...
- dtype: ...
- vlen: ...
- optional: ...
-
-outputs:
-- label: ...
- domain: ...
- dtype: ...
- vlen: ...
- optional: ...
-
-# 'file_format' specifies the version of the GRC yml format used in the file
-# and should usually not be changed.
-file_format: 1
-'''
-
-# SWIG string
-Templates['swig_block_magic'] = """% if version == '36':
-% if blocktype != 'noblock':
-GR_SWIG_BLOCK_MAGIC(${modname}, ${blockname});
-% endif
-%%include "${modname}_${blockname}.h"
-% else:
-%%include "${include_dir_prefix}/${blockname}.h"
- % if blocktype != 'noblock':
-GR_SWIG_BLOCK_MAGIC2(${modname}, ${blockname});
- % endif
-% endif
-"""
-
-## Old stuff
-# C++ file of a GR block
-Templates['block_cpp36'] = '''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-% if blocktype != 'noblock':
-#include <gr_io_signature.h>
-% endif
-#include "${modname}_${blockname}.h"
-
-% if blocktype == 'noblock':
-${modname}_${blockname}::${modname}_${blockname}(${strip_default_values(arglist)})
-{
-}
-
-${modname}_${blockname}::~${modname}_${blockname}()
-{
-}
-% else:
-${modname}_${blockname}_sptr
-${modname}_make_${blockname} (${strip_default_values(arglist)})
-{
- return gnuradio::get_initial_sptr (new ${modname}_${blockname}(${strip_arg_types(arglist)}));
-}
-
-<%
- if blocktype == 'interpolator':
- deciminterp = ', <+interpolation+>'
- elif blocktype == 'decimator':
- deciminterp = ', <+decimation+>'
- else:
- deciminterp = ''
- if arglist == '':
- arglistsep = ''
- else:
- arglistsep = ', '
- if blocktype == 'source':
- inputsig = '0, 0, 0'
- else:
- inputsig = '<+MIN_IN+>, <+MAX_IN+>, sizeof(<+ITYPE+>)'
- endif
- if blocktype == 'sink':
- outputsig = '0, 0, 0'
- else:
- outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, sizeof(<+OTYPE+>)'
- endif
-%>
-
-/*
- * The private constructor
- */
-${modname}_${blockname}::${modname}_${blockname} (${strip_default_values(arglist)})
- : gr_${grblocktype} ("${blockname}",
- gr_make_io_signature(${inputsig}),
- gr_make_io_signature(${outputsig})${deciminterp})
-{
-% if blocktype == 'hier'
- connect(self(), 0, d_firstblock, 0);
- // <+connect other blocks+>
- connect(d_lastblock, 0, self(), 0);
-% else:
- // Put in <+constructor stuff+> here
-% endif
-}
-
-
-/*
- * Our virtual destructor.
- */
-${modname}_${blockname}::~${modname}_${blockname}()
-{
- // Put in <+destructor stuff+> here
-}
-% endif
-
-
-% if blocktype == 'general'
-void
-${modname}_${blockname}::forecast (int noutput_items, gr_vector_int &ninput_items_required)
-{
- /* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
-}
-
-int
-${modname}_${blockname}::general_work (int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items)
-{
- const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
- <+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
-
- // Do <+signal processing+>
- // Tell runtime system how many input items we consumed on
- // each input stream.
- consume_each (noutput_items);
-
- // Tell runtime system how many output items we produced.
- return noutput_items;
-}
-% elif blocktype == 'hier' or $blocktype == 'noblock':
-% else:
-int
-${modname}_${blockname}::work(int noutput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items)
-{
- const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
- <+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
-
- // Do <+signal processing+>
-
- // Tell runtime system how many output items we produced.
- return noutput_items;
-}
-% endif
-
-'''
-
-# Block definition header file (for include/)
-Templates['block_h36'] = r'''/* -*- c++ -*- */
-${str_to_fancyc_comment(license)}
-#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_H
-#define INCLUDED_${modname.upper()}_${blockname.upper()}_H
-
-#include <${modname}_api.h>
-% if blocktype == 'noblock':
-class ${modname.upper()}_API ${blockname}
-{
- ${blockname}(${arglist});
- ~${blockname}();
- private:
-};
-
-% else:
-#include <gr_${grblocktype}.h>
-
-class ${modname}_${blockname};
-
-typedef std::shared_ptr<${modname}_${blockname}> ${modname}_${blockname}_sptr;
-
-${modname.upper()}_API ${modname}_${blockname}_sptr ${modname}_make_${blockname} (${arglist});
-
-/*!
- * \brief <+description+>
- * \ingroup ${modname}
- *
- */
-class ${modname.upper()}_API ${modname}_${blockname} : public gr_${grblocktype}
-{
- private:
- friend ${modname.upper()}_API ${modname}_${blockname}_sptr ${modname}_make_${blockname} (${strip_default_values(arglist)});
-
- ${modname}_${blockname}(${strip_default_values(arglist)});
-
- public:
- ~${modname}_${blockname}();
-
- % if blocktype == 'general':
- void forecast (int noutput_items, gr_vector_int &ninput_items_required);
-
- // Where all the action really happens
- int general_work (int noutput_items,
- gr_vector_int &ninput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items);
- % elif blocktype == 'hier':
- % else:
- // Where all the action really happens
- int work (int noutput_items,
- gr_vector_const_void_star &input_items,
- gr_vector_void_star &output_items);
- % endif
-};
-% endif
-
-#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_H */
-
-'''
diff --git a/gr-utils/python/modtool/tests/CMakeLists.txt b/gr-utils/python/modtool/tests/CMakeLists.txt
deleted file mode 100644
index 7c0473bcd9..0000000000
--- a/gr-utils/python/modtool/tests/CMakeLists.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-########################################################################
-# Handle the unit tests
-########################################################################
-if(ENABLE_TESTING)
-
- set(GR_TEST_TARGET_DEPS "")
- set(GR_TEST_LIBRARY_DIRS "")
- set(GR_TEST_PYTHON_DIRS
- ${CMAKE_CURRENT_SOURCE_DIR}/../..
- ${CMAKE_BINARY_DIR}/gnuradio-runtime/python
- ${CMAKE_BINARY_DIR}/gnuradio-runtime/swig
- )
-
- include(GrTest)
- file(GLOB py_qa_test_files "test_*.py")
-
- foreach(py_qa_test_file ${py_qa_test_files})
- get_filename_component(py_qa_test_name ${py_qa_test_file} NAME_WE)
- GR_ADD_TEST(${py_qa_test_name} ${QA_PYTHON_EXECUTABLE} -B ${py_qa_test_file})
- endforeach(py_qa_test_file)
-
-endif(ENABLE_TESTING)
diff --git a/gr-utils/python/modtool/tests/__init__.py b/gr-utils/python/modtool/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/gr-utils/python/modtool/tests/__init__.py
+++ /dev/null
diff --git a/gr-utils/python/modtool/tests/test_modtool.py b/gr-utils/python/modtool/tests/test_modtool.py
deleted file mode 100644
index 89b665d0bd..0000000000
--- a/gr-utils/python/modtool/tests/test_modtool.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#
-# Copyright 2018, 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" The file for testing the gr-modtool scripts """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import shutil
-import tempfile
-import unittest
-import warnings
-from os import path
-try:
- from pylint.epylint import py_run
- skip_pylint_test = False
-except:
- skip_pylint_test = True
-
-from modtool.core import ModToolNewModule
-from modtool.core import ModToolAdd
-from modtool.core import ModToolDisable
-from modtool.core import ModToolException
-from modtool.core import ModToolMakeYAML
-from modtool.core import ModToolRename
-from modtool.core import ModToolRemove
-
-class TestModToolCore(unittest.TestCase):
- """ The tests for the modtool core """
- def __init__(self, *args, **kwargs):
- super(TestModToolCore, self).__init__(*args, **kwargs)
- self.f_add = False
- self.f_newmod = False
- self.srcdir = path.abspath(path.join(path.dirname(path.realpath(__file__)), '../templates/gr-newmod'))
-
- @classmethod
- def setUpClass(cls):
- """ create a temporary directory """
- cls.test_dir = tempfile.mkdtemp()
-
- @classmethod
- def tearDownClass(cls):
- """ remove the directory after the test """
- shutil.rmtree(cls.test_dir)
-
- def setUp(self):
- """ create a new module and block before every test """
- try:
- warnings.simplefilter("ignore", ResourceWarning)
- args = {'module_name':'howto',
- 'directory': self.test_dir,
- 'srcdir': self.srcdir}
- ModToolNewModule(**args).run()
- except (TypeError, ModToolException):
- self.f_newmod = True
- else:
- try:
- args = {'blockname':'square_ff', 'block_type':'general',
- 'lang':'cpp', 'directory': self.test_dir + '/gr-howto',
- 'add_python_qa': True}
- ModToolAdd(**args).run()
- except (TypeError, ModToolException):
- self.f_add = True
-
- def tearDown(self):
- """ removes the created module """
- # Required, else the new-module directory command
- # in setup will throw exception after first test
- ## cannot remove if directory is not created
- if not self.f_newmod:
- rmdir = self.test_dir + '/gr-howto'
- shutil.rmtree(rmdir)
-
- def test_newmod(self):
- """ Tests for the API function newmod """
- ## Tests for proper exceptions ##
- test_dict = { 'directory': self.test_dir,
- 'srcdir': self.srcdir}
- # module name not specified
- self.assertRaises(ModToolException, ModToolNewModule(**test_dict).run)
- test_dict['module_name'] = 'howto'
- # expected module_name as a string instead of dict
- self.assertRaises(TypeError, ModToolNewModule(test_dict).run)
- # directory already exists
- # will not be raised if the command in setup failed
- self.assertRaises(ModToolException, ModToolNewModule(**test_dict).run)
-
- ## Some tests for checking the created directory, sub-directories and files ##
- test_dict['module_name'] = 'test'
- ModToolNewModule(**test_dict).run()
- module_dir = path.join(self.test_dir, 'gr-test')
- self.assertTrue(path.isdir(module_dir))
- self.assertTrue(path.isdir(path.join(module_dir, 'lib')))
- self.assertTrue(path.isdir(path.join(module_dir, 'python')))
- self.assertTrue(path.isdir(path.join(module_dir, 'include')))
- self.assertTrue(path.isdir(path.join(module_dir, 'docs')))
- self.assertTrue(path.isdir(path.join(module_dir, 'cmake')))
- self.assertTrue(path.isdir(path.join(module_dir, 'swig')))
- self.assertTrue(path.exists(path.join(module_dir, 'CMakeLists.txt')))
-
- ## The check for object instantiation ##
- test_obj = ModToolNewModule(srcdir = self.srcdir)
- # module name not specified
- with self.assertRaises(ModToolException) as context_manager:
- test_obj.run()
- test_obj.info['modname'] = 'howto'
- test_obj.directory = self.test_dir
- # directory already exists
- self.assertRaises(ModToolException, test_obj.run)
- test_obj.info['modname'] = 'test1'
- test_obj.run()
- self.assertTrue(path.isdir(self.test_dir+'/gr-test1'))
- self.assertTrue(path.isdir(self.test_dir+'/gr-test1/lib'))
- self.assertTrue(path.exists(self.test_dir+'/gr-test1/CMakeLists.txt'))
-
- @unittest.skipIf(skip_pylint_test, 'pylint dependency missing, skip test')
- def test_pylint_newmod(self):
- """ Pylint tests for API function newmod """
- module_dir = path.join(self.test_dir, 'gr-test')
- ## pylint tests ##
- python_dir = path.join(module_dir, 'python')
- py_module = path.join(python_dir, 'mul_ff.py')
- (pylint_stdout, pylint_stderr) = py_run(py_module+' --errors-only --disable=E0602', return_std=True)
- print(pylint_stdout.getvalue(), end='')
- py_module = path.join(python_dir, 'qa_mul_ff.py')
- (pylint_stdout, pylint_stderr) = py_run(py_module+' --errors-only', return_std=True)
- print(pylint_stdout.getvalue(), end='')
-
- def test_add(self):
- """ Tests for the API function add """
- ## skip tests if newmod command wasn't successful
- if self.f_newmod:
- raise unittest.SkipTest("setUp for API function 'add' failed")
- module_dir = path.join(self.test_dir, 'gr-howto')
- ## Tests for proper exceptions ##
- test_dict = {}
- test_dict['directory'] = module_dir
- # missing blockname, block_type, lang
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['blockname'] = 'add_ff'
- # missing arguments block_type, lang
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['block_type'] = 'general'
- # missing argument lang
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['lang'] = 'cxx'
- # incorrect language
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['lang'] = 'cpp'
- test_dict['add_cpp_qa'] = 'Wrong'
- # boolean is expected for add_cpp_qa
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['add_cpp_qa'] = True
- test_dict['block_type'] = 'generaleee'
- # incorrect block type
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['block_type'] = 'general'
- test_dict['skip_lib'] = 'fail'
- # boolean value is expected for skip_lib, fails in instantiation
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
- test_dict['skip_lib'] = True
- # missing relevant subdir
- self.assertRaises(ModToolException, ModToolAdd(**test_dict).run)
-
- ## Some tests for checking the created directory, sub-directories and files ##
- test_dict['skip_lib'] = False
- ModToolAdd(**test_dict).run()
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'qa_add_ff.cc')))
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'add_ff_impl.cc')))
- self.assertTrue(path.exists(path.join(module_dir, 'grc', 'howto_add_ff.block.yml')))
- self.assertTrue(path.exists(path.join(module_dir, 'include', 'howto', 'add_ff.h')))
-
- ## The check for object instantiation ##
- test_obj = ModToolAdd()
- test_obj.dir = module_dir
- # missing blocktype, lang, blockname
- self.assertRaises(ModToolException, test_obj.run)
- test_obj.info['blocktype'] = 'general'
- # missing lang, blockname
- self.assertRaises(ModToolException, test_obj.run)
- test_obj.info['lang'] = 'python'
- test_obj.info['blockname'] = 'mul_ff'
- test_obj.add_py_qa = True
- test_obj.run()
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'mul_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'qa_mul_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'grc', 'howto_mul_ff.block.yml')))
-
- @unittest.skipIf(skip_pylint_test, 'pylint dependency missing, skip test')
- def test_pylint_add(self):
- """ Pylint tests for API function add """
- ## skip tests if newmod command wasn't successful
- if self.f_newmod:
- raise unittest.SkipTest("setUp for API function 'add' failed")
- module_dir = path.join(self.test_dir, 'gr-howto')
-
- ## The check for object instantiation ##
- test_obj = ModToolAdd()
- test_obj.dir = module_dir
- # missing blocktype, lang, blockname
- self.assertRaises(ModToolException, test_obj.run)
- test_obj.info['blocktype'] = 'general'
- # missing lang, blockname
- self.assertRaises(ModToolException, test_obj.run)
- test_obj.info['lang'] = 'python'
- test_obj.info['blockname'] = 'mul_ff'
- test_obj.add_py_qa = True
- test_obj.run()
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'mul_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'qa_mul_ff.py')))
-
- ## pylint tests ##
- python_dir = path.join(module_dir, 'python')
- py_module = path.join(python_dir, 'mul_ff.py')
- (pylint_stdout, pylint_stderr) = py_run(py_module+' --errors-only --disable=E0602', return_std=True)
- print(pylint_stdout.getvalue(), end='')
- py_module = path.join(python_dir, 'qa_mul_ff.py')
- (pylint_stdout, pylint_stderr) = py_run(py_module+' --errors-only', return_std=True)
- print(pylint_stdout.getvalue(), end='')
-
- def test_rename(self):
- """ Tests for the API function rename """
- if self.f_newmod or self.f_add:
- raise unittest.SkipTest("setUp for API function 'rename' failed")
-
- module_dir = path.join(self.test_dir, 'gr-howto')
- test_dict = {}
- test_dict['directory'] = module_dir
- # Missing 2 arguments blockname, new_name
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
- test_dict['blockname'] = 'square_ff'
- # Missing argument new_name
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
- test_dict['new_name'] = '//#'
- # Invalid new block name!
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
- test_dict['new_name'] = None
- # New Block name not specified
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
-
- ## Some tests for checking the renamed files ##
- test_dict['new_name'] = 'div_ff'
- ModToolRename(**test_dict).run()
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'div_ff_impl.h')))
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'div_ff_impl.cc')))
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'qa_div_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'grc', 'howto_div_ff.block.yml')))
-
- ## The check for object instantiation ##
- test_obj = ModToolRename()
- test_obj.info['oldname'] = 'div_ff'
- test_obj.info['newname'] = 'sub_ff'
- test_obj.run()
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'sub_ff_impl.h')))
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'sub_ff_impl.cc')))
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'qa_sub_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'grc', 'howto_sub_ff.block.yml')))
-
- def test_remove(self):
- """ Tests for the API function remove """
- if self.f_newmod or self.f_add:
- raise unittest.SkipTest("setUp for API function 'remove' failed")
- module_dir = path.join(self.test_dir, 'gr-howto')
- test_dict = {}
- # missing argument blockname
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
- test_dict['directory'] = module_dir
- self.assertRaises(ModToolException, ModToolRename(**test_dict).run)
-
- ## Some tests to check blocks are not removed with different blocknames ##
- test_dict['blockname'] = 'div_ff'
- ModToolRemove(**test_dict).run()
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'square_ff_impl.h')))
- self.assertTrue(path.exists(path.join(module_dir, 'lib', 'square_ff_impl.cc')))
- self.assertTrue(path.exists(path.join(module_dir, 'python', 'qa_square_ff.py')))
- self.assertTrue(path.exists(path.join(module_dir, 'grc', 'howto_square_ff.block.yml')))
-
- ## Some tests for checking the non-existence of removed files ##
- test_dict['blockname'] = 'square_ff'
- ModToolRemove(**test_dict).run()
- self.assertTrue(not path.exists(path.join(module_dir, 'lib', 'square_ff_impl.h')))
- self.assertTrue(not path.exists(path.join(module_dir, 'lib', 'square_ff_impl.cc')))
- self.assertTrue(not path.exists(path.join(module_dir, 'python', 'qa_square_ff.py')))
- self.assertTrue(not path.exists(path.join(module_dir, 'grc', 'howto_square_ff.block.yml')))
-
- def test_makeyaml(self):
- """ Tests for the API function makeyaml """
- if self.f_newmod or self.f_add:
- raise unittest.SkipTest("setUp for API function 'makeyaml' failed")
- module_dir = path.join(self.test_dir, 'gr-howto')
- test_dict = {}
- # missing argument blockname
- self.assertRaises(ModToolException, ModToolMakeYAML(**test_dict).run)
- test_dict['directory'] = module_dir
- self.assertRaises(ModToolException, ModToolMakeYAML(**test_dict).run)
-
- ## Some tests to check if the command reuns ##
- test_dict['blockname'] = 'square_ff'
- ModToolMakeYAML(**test_dict).run()
-
- def test_disable(self):
- """ Tests for the API function disable """
- if self.f_newmod or self.f_add:
- raise unittest.SkipTest("setUp for API function 'disable' failed")
- module_dir = path.join(self.test_dir, 'gr-howto')
- test_dict = {}
- # missing argument blockname
- self.assertRaises(ModToolException, ModToolDisable(**test_dict).run)
- test_dict['directory'] = module_dir
- self.assertRaises(ModToolException, ModToolDisable(**test_dict).run)
-
- ## Some tests to check if the command reuns ##
- test_dict['blockname'] = 'square_ff'
- ModToolDisable(**test_dict).run()
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/gr-utils/python/modtool/tools/CMakeLists.txt b/gr-utils/python/modtool/tools/CMakeLists.txt
deleted file mode 100644
index 8248bb3e7d..0000000000
--- a/gr-utils/python/modtool/tools/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-
-include(GrPython)
-
-GR_PYTHON_INSTALL(FILES
- __init__.py
- cmakefile_editor.py
- code_generator.py
- grc_yaml_generator.py
- parser_cc_block.py
- scm.py
- util_functions.py
- DESTINATION ${GR_PYTHON_DIR}/gnuradio/modtool/tools
-)
diff --git a/gr-utils/python/modtool/tools/__init__.py b/gr-utils/python/modtool/tools/__init__.py
deleted file mode 100644
index 55a55fd166..0000000000
--- a/gr-utils/python/modtool/tools/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from .cmakefile_editor import CMakeFileEditor
-from .code_generator import render_template
-from .grc_yaml_generator import GRCYAMLGenerator
-from .parser_cc_block import ParserCCBlock
-from .scm import SCMRepoFactory
-from .util_functions import * \ No newline at end of file
diff --git a/gr-utils/python/modtool/tools/cmakefile_editor.py b/gr-utils/python/modtool/tools/cmakefile_editor.py
deleted file mode 100644
index 29f9c3eb09..0000000000
--- a/gr-utils/python/modtool/tools/cmakefile_editor.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#
-# Copyright 2013, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Edit CMakeLists.txt files """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import logging
-
-logger = logging.getLogger(__name__)
-
-
-class CMakeFileEditor(object):
- """A tool for editing CMakeLists.txt files. """
- def __init__(self, filename, separator='\n ', indent=' '):
- self.filename = filename
- with open(filename, 'r') as f:
- self.cfile = f.read()
- self.separator = separator
- self.indent = indent
-
- def append_value(self, entry, value, to_ignore_start='', to_ignore_end=''):
- """ Add a value to an entry. """
- regexp = re.compile(r'({}\({}[^()]*?)\s*?(\s?{})\)'.format(entry, to_ignore_start, to_ignore_end),
- re.MULTILINE)
- substi = r'\1' + self.separator + value + r'\2)'
- (self.cfile, nsubs) = regexp.subn(substi, self.cfile, count=1)
- return nsubs
-
- def remove_value(self, entry, value, to_ignore_start='', to_ignore_end=''):
- """
- Remove a value from an entry.
- Example: You want to remove file.cc from this list() entry:
- list(SOURCES
- file.cc
- other_file.cc
- )
-
- Then run:
- >>> C.remove_value('list', 'file.cc', 'SOURCES')
-
- Returns the number of occurrences of entry in the current file
- that were removed.
- """
- # In the case of the example above, these are cases we need to catch:
- # - list(file.cc ...
- # entry is right after the value parentheses, no whitespace. Can only happen
- # when to_ignore_start is empty.
- # - list(... file.cc)
- # Other entries come first, then entry is preceded by whitespace.
- # - list(SOURCES ... file.cc) # whitespace!
- # When to_ignore_start is not empty, entry must always be preceded by whitespace.
- if len(to_ignore_start) == 0:
- regexp = r'^\s*({entry}\((?:[^()]*?\s+|)){value}\s*([^()]*{to_ignore_end}\s*\)){to_ignore_start}'
- else:
- regexp = r'^\s*({entry}\(\s*{to_ignore_start}[^()]*?\s+){value}\s*([^()]*{to_ignore_end}\s*\))'
- regexp = regexp.format(
- entry=entry,
- to_ignore_start=to_ignore_start,
- value=value,
- to_ignore_end=to_ignore_end,
- )
- regexp = re.compile(regexp, re.MULTILINE)
- (self.cfile, nsubs) = re.subn(regexp, r'\1\2', self.cfile, count=1)
- return nsubs
-
- def delete_entry(self, entry, value_pattern=''):
- """Remove an entry from the current buffer."""
- regexp = r'{}\s*\([^()]*{}[^()]*\)[^\n]*\n'.format(entry, value_pattern)
- regexp = re.compile(regexp, re.MULTILINE)
- (self.cfile, nsubs) = re.subn(regexp, '', self.cfile, count=1)
- return nsubs
-
- def write(self):
- """ Write the changes back to the file. """
- with open(self.filename, 'w') as f:
- f.write(self.cfile)
-
- def remove_double_newlines(self):
- """Simply clear double newlines from the file buffer."""
- self.cfile = re.compile('\n\n\n+', re.MULTILINE).sub('\n\n', self.cfile)
-
- def find_filenames_match(self, regex):
- """ Find the filenames that match a certain regex
- on lines that aren't comments """
- filenames = []
- reg = re.compile(regex)
- fname_re = re.compile(r'[a-zA-Z]\w+\.\w{1,5}$')
- for line in self.cfile.splitlines():
- if len(line.strip()) == 0 or line.strip()[0] == '#':
- continue
- for word in re.split('[ /)(\t\n\r\f\v]', line):
- if fname_re.match(word) and reg.search(word):
- filenames.append(word)
- return filenames
-
- def disable_file(self, fname):
- """ Comment out a file.
- Example:
- add_library(
- file1.cc
- )
-
- Here, file1.cc becomes #file1.cc with disable_file('file1.cc').
- """
- starts_line = False
- for line in self.cfile.splitlines():
- if len(line.strip()) == 0 or line.strip()[0] == '#':
- continue
- if re.search(r'\b'+fname+r'\b', line):
- if re.match(fname, line.lstrip()):
- starts_line = True
- break
- comment_out_re = r'#\1' + '\n' + self.indent
- if not starts_line:
- comment_out_re = r'\n' + self.indent + comment_out_re
- (self.cfile, nsubs) = re.subn(r'(\b'+fname+r'\b)\s*', comment_out_re, self.cfile)
- if nsubs == 0:
- logger.warning("Warning: A replacement failed when commenting out {}. Check the CMakeFile.txt manually.".format(fname))
- elif nsubs > 1:
- logger.warning("Warning: Replaced {} {} times (instead of once). Check the CMakeFile.txt manually.".format(fname, nsubs))
-
- def comment_out_lines(self, pattern, comment_str='#'):
- """ Comments out all lines that match with pattern """
- for line in self.cfile.splitlines():
- if re.search(pattern, line):
- self.cfile = self.cfile.replace(line, comment_str+line)
-
- def check_for_glob(self, globstr):
- """ Returns true if a glob as in globstr is found in the cmake file """
- glob_re = r'GLOB\s[a-z_]+\s"{}"'.format(globstr.replace('*', r'\*'))
- return re.search(glob_re, self.cfile, flags=re.MULTILINE|re.IGNORECASE) is not None
diff --git a/gr-utils/python/modtool/tools/code_generator.py b/gr-utils/python/modtool/tools/code_generator.py
deleted file mode 100644
index fb0d7da686..0000000000
--- a/gr-utils/python/modtool/tools/code_generator.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# Copyright 2013-2014 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" A code generator (needed by ModToolAdd) """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from mako.template import Template
-from ..templates import Templates
-from .util_functions import str_to_fancyc_comment
-from .util_functions import str_to_python_comment
-from .util_functions import strip_default_values
-from .util_functions import strip_arg_types
-from .util_functions import strip_arg_types_grc
-
-GRTYPELIST = {
- 'sync': 'sync_block',
- 'sink': 'sync_block',
- 'source': 'sync_block',
- 'decimator': 'sync_decimator',
- 'interpolator': 'sync_interpolator',
- 'general': 'block',
- 'tagged_stream': 'tagged_stream_block',
- 'hier': 'hier_block2',
- 'noblock': ''
-}
-
-def render_template(tpl_id, **kwargs):
- """ Return the parsed and rendered template given by tpl_id """
- # Choose template
- tpl = Template(Templates[tpl_id])
- # Set up all variables
- kwargs['str_to_fancyc_comment'] = str_to_fancyc_comment
- kwargs['str_to_python_comment'] = str_to_python_comment
- kwargs['strip_default_values'] = strip_default_values
- kwargs['strip_arg_types'] = strip_arg_types
- kwargs['strip_arg_types_grc'] = strip_arg_types_grc
- kwargs['grblocktype'] = GRTYPELIST[kwargs['blocktype']]
- if kwargs['is_component']:
- kwargs['include_dir_prefix'] = "gnuradio/" + kwargs['modname']
- else:
- kwargs['include_dir_prefix'] = kwargs['modname']
- # Render and return
- return tpl.render(**kwargs)
-
diff --git a/gr-utils/python/modtool/tools/grc_yaml_generator.py b/gr-utils/python/modtool/tools/grc_yaml_generator.py
deleted file mode 100644
index 07c00639cf..0000000000
--- a/gr-utils/python/modtool/tools/grc_yaml_generator.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-# Copyright 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" A tool for generating YAML bindings """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from collections import OrderedDict
-
-import yaml
-try:
- from yaml import CLoader as Loader, CDumper as Dumper
-except:
- from yaml import Loader, Dumper
-
-from .util_functions import is_number
-
-
-## setup dumper for dumping OrderedDict ##
-_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
-
-def dict_representer(dumper, data):
- """ Representer to represent special OrderedDict """
- return dumper.represent_dict(data.items())
-
-
-def dict_constructor(loader, node):
- """ Construct an OrderedDict for dumping """
- return OrderedDict(loader.construct_pairs(node))
-
-Dumper.add_representer(OrderedDict, dict_representer)
-Loader.add_constructor(_mapping_tag, dict_constructor)
-
-
-class GRCYAMLGenerator(object):
- """ Create and write the YAML bindings for a GRC block. """
- def __init__(self, modname=None, blockname=None, doc=None, params=None, iosig=None):
- """docstring for __init__"""
- params_list = ['${'+s['key']+'}' for s in params if s['in_constructor']]
- # Can't make a dict 'cause order matters
- self._header = (('id', '{}_{}'.format(modname, blockname)),
- ('label', blockname.replace('_', ' ')),
- ('category', '[{}]'.format(modname.capitalize()))
- )
- self._templates = (('imports', 'import {}'.format(modname)),
- ('make', '{}.{}({})'.format(modname, blockname, ', '.join(params_list)))
- )
- self.params = params
- self.iosig = iosig
- self.doc = doc
- self.data = None
-
- def make_yaml(self):
- """ Create the actual tag tree """
- data = OrderedDict()
- for tag, value in self._header:
- data[tag] = value
-
- templates = OrderedDict()
- for tag, value in self._templates:
- templates[tag] = value
-
- data['templates'] = templates
-
- parameters = []
- for param in self.params:
- parameter = OrderedDict()
- parameter['id'] = param['key']
- parameter['label'] = param['key'].capitalize()
- if param['default']:
- parameter['default'] = param['default']
- parameter['dtype'] = param['type']
- parameters.append(parameter)
-
- if parameters:
- data['parameters'] = parameters
-
- inputs = []
- outputs = []
- iosig = self.iosig
- for inout in sorted(iosig.keys()):
- if iosig[inout]['max_ports'] == '0':
- continue
- for i in range(len(iosig[inout]['type'])):
- s_type = {'in': 'input', 'out': 'output'}[inout]
- s_obj = OrderedDict()
- s_obj['label'] = inout
- s_obj['domain'] = 'stream'
- s_obj['dtype'] = iosig[inout]['type'][i]
- if iosig[inout]['vlen'][i] != '1':
- vlen = iosig[inout]['vlen'][i]
- if is_number(vlen):
- s_obj['vlen'] = vlen
- else:
- s_obj['vlen'] = '${ '+vlen+' }'
- if i == len(iosig[inout]['type'])-1:
- if not is_number(iosig[inout]['max_ports']):
- s_obj['multiplicity'] = iosig[inout]['max_ports']
- elif len(iosig[inout]['type']) < int(iosig[inout]['max_ports']):
- s_obj['multiplicity'] = str(int(iosig[inout]['max_ports']) -
- len(iosig[inout]['type'])+1)
- if s_type == 'input':
- inputs.append(s_obj)
- elif s_type == 'output':
- outputs.append(s_obj)
-
- if inputs:
- data['inputs'] = inputs
-
- if outputs:
- data['outputs'] = outputs
-
- if self.doc is not None:
- data['documentation'] = self.doc
- self.data = data
- data['file_format'] = 1
-
- def save(self, filename):
- """ Write the YAML file """
- self.make_yaml()
- with open(filename, 'w') as f:
- yaml.dump(self.data, f, Dumper=Dumper, default_flow_style=False)
diff --git a/gr-utils/python/modtool/tools/parser_cc_block.py b/gr-utils/python/modtool/tools/parser_cc_block.py
deleted file mode 100644
index 81c10f0cdc..0000000000
--- a/gr-utils/python/modtool/tools/parser_cc_block.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#
-# Copyright 2013, 2018 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-''' A parser for blocks written in C++ '''
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import sys
-import logging
-
-logger = logging.getLogger(__name__)
-
-def dummy_translator(the_type, default_v=None):
- """ Doesn't really translate. """
- return the_type
-
-class ParserCCBlock(object):
- """ Class to read blocks written in C++ """
- def __init__(self, filename_cc, filename_h, blockname, version, type_trans=dummy_translator):
- with open(filename_cc) as f:
- self.code_cc = f.read()
- with open(filename_h) as f:
- self.code_h = f.read()
- self.blockname = blockname
- self.type_trans = type_trans
- self.version = version
-
- def read_io_signature(self):
- """ Scans a .cc file for an IO signature. """
- def _figure_out_iotype_and_vlen(iosigcall, typestr):
- """ From a type identifier, returns the data type.
- E.g., for sizeof(int), it will return 'int'.
- Returns a list! """
- if 'gr::io_signature::makev' in iosigcall:
- logger.error('tbi')
- raise ValueError
- return {'type': [_typestr_to_iotype(x) for x in typestr.split(',')],
- 'vlen': [_typestr_to_vlen(x) for x in typestr.split(',')]
- }
- def _typestr_to_iotype(typestr):
- """ Convert a type string (e.g. sizeof(int) * vlen) to the type (e.g. 'int'). """
- type_match = re.search(r'sizeof\s*\(([^)]*)\)', typestr)
- if type_match is None:
- return self.type_trans('char')
- return self.type_trans(type_match.group(1))
- def _typestr_to_vlen(typestr):
- """ From a type identifier, returns the vector length of the block's
- input/out. E.g., for 'sizeof(int) * 10', it returns 10. For
- 'sizeof(int)', it returns '1'. For 'sizeof(int) * vlen', it returns
- the string vlen. """
- # Catch fringe case where no sizeof() is given
- if typestr.find('sizeof') == -1:
- return typestr
- if typestr.find('*') == -1:
- return '1'
- vlen_parts = typestr.split('*')
- for fac in vlen_parts:
- if fac.find('sizeof') != -1:
- vlen_parts.remove(fac)
- if len(vlen_parts) == 1:
- return vlen_parts[0].strip()
- elif len(vlen_parts) > 1:
- return '*'.join(vlen_parts).strip()
- iosig = {}
- iosig_regex = r'(?P<incall>gr::io_signature::make[23v]?)\s*\(\s*(?P<inmin>[^,]+),\s*(?P<inmax>[^,]+),' + \
- r'\s*(?P<intype>(\([^\)]*\)|[^)])+)\),\s*' + \
- r'(?P<outcall>gr::io_signature::make[23v]?)\s*\(\s*(?P<outmin>[^,]+),\s*(?P<outmax>[^,]+),' + \
- r'\s*(?P<outtype>(\([^\)]*\)|[^)])+)\)'
- iosig_match = re.compile(iosig_regex, re.MULTILINE).search(self.code_cc)
- try:
- iosig['in'] = _figure_out_iotype_and_vlen(iosig_match.group('incall'),
- iosig_match.group('intype'))
- iosig['in']['min_ports'] = iosig_match.group('inmin')
- iosig['in']['max_ports'] = iosig_match.group('inmax')
- except Exception:
- logger.error("Error: Can't parse input signature.")
- try:
- iosig['out'] = _figure_out_iotype_and_vlen(iosig_match.group('outcall'),
- iosig_match.group('outtype'))
- iosig['out']['min_ports'] = iosig_match.group('outmin')
- iosig['out']['max_ports'] = iosig_match.group('outmax')
- except Exception:
- logger.error("Error: Can't parse output signature.")
- return iosig
-
-
- def read_params(self):
- """ Read the parameters required to initialize the block """
- def _scan_param_list(start_idx):
- """ Go through a parameter list and return a tuple each:
- (type, name, default_value). Python's re just doesn't cut
- it for C++ code :( """
- i = start_idx
- c = self.code_h
- if c[i] != '(':
- raise ValueError
- i += 1
-
- param_list = []
- read_state = 'type'
- in_string = False
- parens_count = 0 # Counts ()
- brackets_count = 0 # Counts <>
- end_of_list = False
- this_type = ''
- this_name = ''
- this_defv = ''
- WHITESPACE = ' \t\n\r\f\v'
- while not end_of_list:
- # Keep track of (), stop when reaching final closing parens
- if not in_string:
- if c[i] == ')':
- if parens_count == 0:
- if read_state == 'type' and len(this_type):
- raise ValueError(
- 'Found closing parentheses before finishing '
- 'last argument (this is how far I got: {})'.format \
- (str(param_list))
- )
- if len(this_type):
- param_list.append((this_type, this_name, this_defv))
- end_of_list = True
- break
- else:
- parens_count -= 1
- elif c[i] == '(':
- parens_count += 1
- # Parameter type (int, const std::string, std::vector<gr_complex>, unsigned long ...)
- if read_state == 'type':
- if c[i] == '<':
- brackets_count += 1
- if c[i] == '>':
- brackets_count -= 1
- if c[i] == '&':
- i += 1
- continue
- if c[i] in WHITESPACE and brackets_count == 0:
- while c[i] in WHITESPACE:
- i += 1
- continue
- if this_type == 'const' or this_type == '': # Ignore this
- this_type = ''
- elif this_type == 'unsigned': # Continue
- this_type += ' '
- continue
- else:
- read_state = 'name'
- continue
- this_type += c[i]
- i += 1
- continue
- # Parameter name
- if read_state == 'name':
- if c[i] == '&' or c[i] in WHITESPACE:
- i += 1
- elif c[i] == '=':
- if parens_count != 0:
- raise ValueError(
- 'While parsing argument {} ({}): name finished but no closing parentheses.'.format \
- (len(param_list)+1, this_type + ' ' + this_name)
- )
- read_state = 'defv'
- i += 1
- elif c[i] == ',':
- if parens_count:
- raise ValueError(
- 'While parsing argument {} ({}): name finished but no closing parentheses.'.format \
- (len(param_list)+1, this_type + ' ' + this_name)
- )
- read_state = 'defv'
- else:
- this_name += c[i]
- i += 1
- continue
- # Default value
- if read_state == 'defv':
- if in_string:
- if c[i] == '"' and c[i-1] != '\\':
- in_string = False
- else:
- this_defv += c[i]
- elif c[i] == ',':
- if parens_count:
- raise ValueError(
- 'While parsing argument {} ({}): default value finished but no closing parentheses.'.format \
- (len(param_list)+1, this_type + ' ' + this_name)
- )
- read_state = 'type'
- param_list.append((this_type, this_name, this_defv))
- this_type = ''
- this_name = ''
- this_defv = ''
- else:
- this_defv += c[i]
- i += 1
- continue
- return param_list
- # Go, go, go!
- if self.version in ('37', '38'):
- make_regex = r'static\s+sptr\s+make\s*'
- else:
- make_regex = r'(?<=_API)\s+\w+_sptr\s+\w+_make_\w+\s*'
- make_match = re.compile(make_regex, re.MULTILINE).search(self.code_h)
- try:
- params_list = _scan_param_list(make_match.end(0))
- except ValueError as ve:
- logger.error("Can't parse the argument list: ", ve.args[0])
- sys.exit(0)
- params = []
- for plist in params_list:
- params.append({'type': self.type_trans(plist[0], plist[2]),
- 'key': plist[1],
- 'default': plist[2],
- 'in_constructor': True})
- return params
diff --git a/gr-utils/python/modtool/tools/scm.py b/gr-utils/python/modtool/tools/scm.py
deleted file mode 100644
index 707ed6754c..0000000000
--- a/gr-utils/python/modtool/tools/scm.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#
-# Copyright 2013 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Class to handle source code management repositories. """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-import subprocess
-
-logger = logging.getLogger(__name__)
-
-try:
- import git
- HAS_GITPYTHON = True
-except ImportError:
- HAS_GITPYTHON = False
-# GitPython is a bit too unstable currently
-HAS_GITPYTHON = False
-
-class InvalidSCMError(Exception):
- """ Exception for when trying to access a repo of wrong type. """
- def __init__(self):
- Exception.__init__(self)
-
-### Base class ###############################################################
-class SCMRepository(object):
- """ Base class to handle interactions with source code management systems. """
- handles_scm_type = '*'
- def __init__(self, path_to_repo, is_empty=False):
- self.path_to_repo = path_to_repo
- self.is_empty = is_empty
-
- def init_repo(self, path_to_repo=None, add_files=True):
- """ Initialize the directory as a repository. Assumes the self.path_to_repo
- (or path_to_repo, if specified) does *not* contain a valid repository.
- If add_files is True, all files in this directory are added to version control.
- Returns true if actually created a repo.
- """
- if path_to_repo is not None:
- self.path_to_repo = path_to_repo
- return False
-
- def add_files(self, paths_to_files):
- """ Add a tuple or list of files to the current repository. """
- pass
-
- def add_file(self, path_to_file):
- """ Add a file to the current repository. """
- self.add_files([path_to_file])
-
- def remove_files(self, paths_to_files):
- """ Remove a tuple or list of files from the current repository. """
- pass
-
- def remove_file(self, path_to_file):
- """ Remove a file from the current repository. """
- self.remove_files([path_to_file])
-
- def mark_files_updated(self, paths_to_files):
- """ Mark a list of tuple of files as changed. """
- pass
-
- def mark_file_updated(self, path_to_file):
- """ Mark a file as changed. """
- self.mark_files_updated([path_to_file])
-
- def is_active(self):
- """ Returns true if this repository manager is operating on an active, source-controlled directory. """
- return self.is_empty
-
- def get_gituser(self):
- """ Gets the git user """
- try:
- return (subprocess.check_output('git config --global user.name', shell=True).strip()).decode('utf-8')
- except (OSError, subprocess.CalledProcessError):
- return None
-
-
-### Git #####################################################################
-class GitManagerGitPython(object):
- """ Manage git through GitPython (preferred way). """
- def __init__(self, path_to_repo, init=False):
- if init:
- self.repo = git.Repo.init(path_to_repo, mkdir=False)
- else:
- try:
- self.repo = git.Repo(path_to_repo)
- except git.InvalidGitRepositoryError:
- self.repo = None
- raise InvalidSCMError
- self.index = self.repo.index
-
- def add_files(self, paths_to_files):
- """ Adds a tuple of files to the index of the current repository. """
- if self.repo is not None:
- self.index.add(paths_to_files)
-
- def remove_files(self, paths_to_files):
- """ Removes a tuple of files from the index of the current repository. """
- if self.repo is not None:
- self.index.remove(paths_to_files)
-
-
-class GitManagerShell(object):
- """ Call the git executable through a shell. """
- def __init__(self, path_to_repo, init=False, git_executable=None):
- self.path_to_repo = path_to_repo
- if git_executable is None:
- try:
- self.git_executable = subprocess.check_output('which git', shell=True).strip()
- except (OSError, subprocess.CalledProcessError):
- raise InvalidSCMError
- try:
- if init:
- subprocess.check_output([self.git_executable, 'init'])
- else:
- subprocess.check_output([self.git_executable, 'status'])
- except OSError:
- raise InvalidSCMError
- except subprocess.CalledProcessError:
- raise InvalidSCMError
-
- def add_files(self, paths_to_files):
- """ Adds a tuple of files to the index of the current repository. Does not commit. """
- subprocess.check_output([self.git_executable, 'add'] + list(paths_to_files))
-
- def remove_files(self, paths_to_files):
- """ Removes a tuple of files from the index of the current repository. Does not commit. """
- subprocess.check_output([self.git_executable, 'rm', '--cached'] + list(paths_to_files))
-
-
-class GitRepository(SCMRepository):
- """ Specific to operating on git repositories. """
- handles_scm_type = 'git'
- def __init__(self, path_to_repo, is_empty=False):
- SCMRepository.__init__(self, path_to_repo, is_empty)
- if not is_empty:
- try:
- if HAS_GITPYTHON:
- self.repo_manager = GitManagerGitPython(path_to_repo)
- else:
- self.repo_manager = GitManagerShell(path_to_repo)
- except InvalidSCMError:
- self.repo_manager = None
- else:
- self.repo_manager = None
-
- def init_repo(self, path_to_repo=None, add_files=True):
- """ Makes the directory in self.path_to_repo a git repo.
- If add_file is True, all files in this dir are added to the index. """
- SCMRepository.init_repo(self, path_to_repo, add_files)
- if HAS_GITPYTHON:
- self.repo_manager = GitManagerGitPython(self.path_to_repo, init=True)
- else:
- self.repo_manager = GitManagerShell(self.path_to_repo, init=True)
- if add_files:
- self.add_files(('*',))
- return True
-
- def add_files(self, paths_to_files):
- """ Add a file to the current repository. Does not commit. """
- self.repo_manager.add_files(paths_to_files)
-
- def remove_files(self, paths_to_files):
- """ Remove a file from the current repository. Does not commit. """
- self.repo_manager.remove_files(paths_to_files)
-
- def mark_files_updated(self, paths_to_files):
- """ Mark a file as changed. Since this is git, same as adding new files. """
- self.add_files(paths_to_files)
-
- def is_active(self):
- return self.repo_manager is not None
-
-
-##############################################################################
-### Factory ##################################################################
-class SCMRepoFactory(object):
- """ Factory object to create the correct SCM class from the given options and dir. """
- def __init__(self, options, path_to_repo):
- self.path_to_repo = path_to_repo
- self.options = options
-
- def make_active_scm_manager(self):
- """ Returns a valid, usable object of type SCMRepository. """
- if self.options.scm_mode == 'no':
- return SCMRepository(self.path_to_repo)
- for glbl in list(globals().values()):
- try:
- if issubclass(glbl, SCMRepository):
- the_scm = glbl(self.path_to_repo)
- if the_scm.is_active():
- logger.info('Found SCM of type:', the_scm.handles_scm_type)
- return the_scm
- except (TypeError, AttributeError, InvalidSCMError):
- pass
- if self.options == 'yes':
- return None
- return SCMRepository(self.path_to_repo)
-
- def make_empty_scm_manager(self, scm_type='git'):
- """ Returns a valid, usable object of type SCMRepository for an uninitialized dir. """
- if self.options.scm_mode == 'no':
- return SCMRepository(self.path_to_repo)
- for glbl in list(globals().values()):
- try:
- if issubclass(glbl, SCMRepository):
- if glbl.handles_scm_type == scm_type:
- return glbl(self.path_to_repo, is_empty=True)
- except (TypeError, AttributeError, InvalidSCMError):
- pass
- if self.options == 'yes':
- return None
- return SCMRepository(self.path_to_repo)
-
diff --git a/gr-utils/python/modtool/tools/util_functions.py b/gr-utils/python/modtool/tools/util_functions.py
deleted file mode 100644
index d7f2b11317..0000000000
--- a/gr-utils/python/modtool/tools/util_functions.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#
-# Copyright 2013, 2018, 2019 Free Software Foundation, Inc.
-#
-# This file is part of GNU Radio
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-#
-""" Utility functions for gr_modtool """
-
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import re
-import sys
-import readline
-
-# None of these must depend on other modtool stuff!
-
-def append_re_line_sequence(filename, linepattern, newline):
- """ Detects the re 'linepattern' in the file. After its last occurrence,
- paste 'newline'. If the pattern does not exist, append the new line
- to the file. Then, write. """
- with open(filename, 'r') as f:
- oldfile = f.read()
- lines = re.findall(linepattern, oldfile, flags=re.MULTILINE)
- if len(lines) == 0:
- with open(filename, 'a') as f:
- f.write(newline)
- return
- last_line = lines[-1]
- newfile = oldfile.replace(last_line, last_line + newline + '\n')
- with open(filename, 'w') as f:
- f.write(newfile)
-
-def remove_pattern_from_file(filename, pattern):
- """ Remove all occurrences of a given pattern from a file. """
- with open(filename, 'r') as f:
- oldfile = f.read()
- pattern = re.compile(pattern, re.MULTILINE)
- with open(filename, 'w') as f:
- f.write(pattern.sub('', oldfile))
-
-def str_to_fancyc_comment(text):
- """ Return a string as a C formatted comment. """
- l_lines = text.splitlines()
- if len(l_lines[0]) == 0:
- outstr = "/*\n"
- else:
- outstr = "/* " + l_lines[0] + "\n"
- for line in l_lines[1:]:
- if len(line) == 0:
- outstr += " *\n"
- else:
- outstr += " * " + line + "\n"
- outstr += " */\n"
- return outstr
-
-def str_to_python_comment(text):
- """ Return a string as a Python formatted comment. """
- l_lines = text.splitlines()
- if len(l_lines[0]) == 0:
- outstr = "#\n"
- else:
- outstr = "# " + l_lines[0] + "\n"
- for line in l_lines[1:]:
- if len(line) == 0:
- outstr += "#\n"
- else:
- outstr += "# " + line + "\n"
- outstr += "#\n"
- return outstr
-
-def strip_default_values(string):
- """ Strip default values from a C++ argument list. """
- return re.sub(' *=[^,)]*', '', string)
-
-def strip_arg_types(string):
- """"
- Strip the argument types from a list of arguments.
- Example: "int arg1, double arg2" -> "arg1, arg2"
- Note that some types have qualifiers, which also are part of
- the type, e.g. "const std::string &name" -> "name", or
- "const char *str" -> "str".
- """
- string = strip_default_values(string)
- return ", ".join(
- [part.strip().split(' ')[-1] for part in string.split(',')]
- ).replace('*','').replace('&','')
-
-def strip_arg_types_grc(string):
- """" Strip the argument types from a list of arguments for GRC make tag.
- Example: "int arg1, double arg2" -> "$arg1, $arg2" """
- if len(string) == 0:
- return ""
- else:
- string = strip_default_values(string)
- return ", ".join(['${' + part.strip().split(' ')[-1] + '}' for part in string.split(',')])
-
-def get_modname():
- """ Grep the current module's name from gnuradio.project or CMakeLists.txt """
- modname_trans = {'howto-write-a-block': 'howto'}
- try:
- with open('gnuradio.project', 'r') as f:
- prfile = f.read()
- regexp = r'projectname\s*=\s*([a-zA-Z0-9-_]+)$'
- return re.search(regexp, prfile, flags=re.MULTILINE).group(1).strip()
- except IOError:
- pass
- # OK, there's no gnuradio.project. So, we need to guess.
- with open('CMakeLists.txt', 'r') as f:
- cmfile = f.read()
- regexp = r'(project\s*\(\s*|GR_REGISTER_COMPONENT\(")gr-(?P<modname>[a-zA-Z0-9-_]+)(\s*(CXX)?|" ENABLE)'
- try:
- modname = re.search(regexp, cmfile, flags=re.MULTILINE).group('modname').strip()
- if modname in list(modname_trans.keys()):
- modname = modname_trans[modname]
- return modname
- except AttributeError:
- return None
-
-def is_number(s):
- """ Return True if the string s contains a number. """
- try:
- float(s)
- return True
- except ValueError:
- return False
-
-def ask_yes_no(question, default):
- """ Asks a binary question. Returns True for yes, False for no.
- default is given as a boolean. """
- question += {True: ' [Y/n] ', False: ' [y/N] '}[default]
- if input(question).lower() != {True: 'n', False: 'y'}[default]:
- return default
- else:
- return not default
-
-class SequenceCompleter(object):
- """ A simple completer function wrapper to be used with readline, e.g.
- option_iterable = ("search", "seek", "destroy")
- readline.set_completer(SequenceCompleter(option_iterable).completefunc)
-
- Typical usage is with the `with` statement. Restores the previous completer
- at exit, thus nestable.
- """
-
- def __init__(self, sequence=None):
- self._seq = sequence or []
- self._tmp_matches = []
-
- def completefunc(self, text, state):
- if not text and state < len(self._seq):
- return self._seq[state]
- if not state:
- self._tmp_matches = [candidate for candidate in self._seq if candidate.startswith(text)]
- if state < len(self._tmp_matches):
- return self._tmp_matches[state]
-
- def __enter__(self):
- self._old_completer = readline.get_completer()
- readline.set_completer(self.completefunc)
- readline.parse_and_bind("tab: complete")
-
- def __exit__(self, exception_type, exception_value, traceback):
- readline.set_completer(self._old_completer)